>
-
- for c := 0; c < 256; c++ {
- var t octetType
- isCtl := c <= 31 || c == 127
- isChar := 0 <= c && c <= 127
- isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
- if strings.ContainsRune(" \t\r\n", rune(c)) {
- t |= isSpace
- }
- if isChar && !isCtl && !isSeparator {
- t |= isToken
- }
- octetTypes[c] = t
- }
-}
-
-// AcceptSpec describes an Accept* header.
-type AcceptSpec struct {
- Value string
- Q float64
-}
-
-// ParseAccept parses Accept* headers.
-func ParseAccept(header http.Header, key string) (specs []AcceptSpec) {
-loop:
- for _, s := range header[key] {
- for {
- var spec AcceptSpec
- spec.Value, s = expectTokenSlash(s)
- if spec.Value == "" {
- continue loop
- }
- spec.Q = 1.0
- s = skipSpace(s)
- if strings.HasPrefix(s, ";") {
- s = skipSpace(s[1:])
- if !strings.HasPrefix(s, "q=") {
- continue loop
- }
- spec.Q, s = expectQuality(s[2:])
- if spec.Q < 0.0 {
- continue loop
- }
- }
- specs = append(specs, spec)
- s = skipSpace(s)
- if !strings.HasPrefix(s, ",") {
- continue loop
- }
- s = skipSpace(s[1:])
- }
- }
- return
-}
-
-func skipSpace(s string) (rest string) {
- i := 0
- for ; i < len(s); i++ {
- if octetTypes[s[i]]&isSpace == 0 {
- break
- }
- }
- return s[i:]
-}
-
-func expectTokenSlash(s string) (token, rest string) {
- i := 0
- for ; i < len(s); i++ {
- b := s[i]
- if (octetTypes[b]&isToken == 0) && b != '/' {
- break
- }
- }
- return s[:i], s[i:]
-}
-
-func expectQuality(s string) (q float64, rest string) {
- switch {
- case len(s) == 0:
- return -1, ""
- case s[0] == '0':
- q = 0
- case s[0] == '1':
- q = 1
- default:
- return -1, ""
- }
- s = s[1:]
- if !strings.HasPrefix(s, ".") {
- return q, s
- }
- s = s[1:]
- i := 0
- n := 0
- d := 1
- for ; i < len(s); i++ {
- b := s[i]
- if b < '0' || b > '9' {
- break
- }
- n = n*10 + int(b) - '0'
- d *= 10
- }
- return q + float64(n)/float64(d), s[i:]
-}
diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go
deleted file mode 100644
index 2e45780b7..000000000
--- a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file or at
-// https://developers.google.com/open-source/licenses/bsd.
-
-package httputil
-
-import (
- "net/http"
-
- "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header"
-)
-
-// NegotiateContentEncoding returns the best offered content encoding for the
-// request's Accept-Encoding header. If two offers match with equal weight and
-// then the offer earlier in the list is preferred. If no offers are
-// acceptable, then "" is returned.
-func NegotiateContentEncoding(r *http.Request, offers []string) string {
- bestOffer := "identity"
- bestQ := -1.0
- specs := header.ParseAccept(r.Header, "Accept-Encoding")
- for _, offer := range offers {
- for _, spec := range specs {
- if spec.Q > bestQ &&
- (spec.Value == "*" || spec.Value == offer) {
- bestQ = spec.Q
- bestOffer = offer
- }
- }
- }
- if bestQ == 0 {
- bestOffer = ""
- }
- return bestOffer
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
deleted file mode 100644
index 3460f0346..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-command-line-arguments.test
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
deleted file mode 100644
index c67ff1b7f..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/README.md
+++ /dev/null
@@ -1 +0,0 @@
-See [](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
deleted file mode 100644
index 450189f35..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2021 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import "runtime/debug"
-
-// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewBuildInfoCollector instead.
-func NewBuildInfoCollector() Collector {
- path, version, sum := "unknown", "unknown", "unknown"
- if bi, ok := debug.ReadBuildInfo(); ok {
- path = bi.Main.Path
- version = bi.Main.Version
- sum = bi.Main.Sum
- }
- c := &selfCollector{MustNewConstMetric(
- NewDesc(
- "go_build_info",
- "Build information about the main Go module.",
- nil, Labels{"path": path, "version": version, "checksum": sum},
- ),
- GaugeValue, 1)}
- c.init(c.self)
- return c
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
deleted file mode 100644
index cf05079fb..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-// Collector is the interface implemented by anything that can be used by
-// Prometheus to collect metrics. A Collector has to be registered for
-// collection. See Registerer.Register.
-//
-// The stock metrics provided by this package (Gauge, Counter, Summary,
-// Histogram, Untyped) are also Collectors (which only ever collect one metric,
-// namely itself). An implementer of Collector may, however, collect multiple
-// metrics in a coordinated fashion and/or create metrics on the fly. Examples
-// for collectors already implemented in this library are the metric vectors
-// (i.e. collection of multiple instances of the same Metric but with different
-// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
-type Collector interface {
- // Describe sends the super-set of all possible descriptors of metrics
- // collected by this Collector to the provided channel and returns once
- // the last descriptor has been sent. The sent descriptors fulfill the
- // consistency and uniqueness requirements described in the Desc
- // documentation.
- //
- // It is valid if one and the same Collector sends duplicate
- // descriptors. Those duplicates are simply ignored. However, two
- // different Collectors must not send duplicate descriptors.
- //
- // Sending no descriptor at all marks the Collector as “unchecked”,
- // i.e. no checks will be performed at registration time, and the
- // Collector may yield any Metric it sees fit in its Collect method.
- //
- // This method idempotently sends the same descriptors throughout the
- // lifetime of the Collector. It may be called concurrently and
- // therefore must be implemented in a concurrency safe way.
- //
- // If a Collector encounters an error while executing this method, it
- // must send an invalid descriptor (created with NewInvalidDesc) to
- // signal the error to the registry.
- Describe(chan<- *Desc)
- // Collect is called by the Prometheus registry when collecting
- // metrics. The implementation sends each collected metric via the
- // provided channel and returns once the last metric has been sent. The
- // descriptor of each sent metric is one of those returned by Describe
- // (unless the Collector is unchecked, see above). Returned metrics that
- // share the same descriptor must differ in their variable label
- // values.
- //
- // This method may be called concurrently and must therefore be
- // implemented in a concurrency safe way. Blocking occurs at the expense
- // of total performance of rendering all registered metrics. Ideally,
- // Collector implementations support concurrent readers.
- Collect(chan<- Metric)
-}
-
-// DescribeByCollect is a helper to implement the Describe method of a custom
-// Collector. It collects the metrics from the provided Collector and sends
-// their descriptors to the provided channel.
-//
-// If a Collector collects the same metrics throughout its lifetime, its
-// Describe method can simply be implemented as:
-//
-// func (c customCollector) Describe(ch chan<- *Desc) {
-// DescribeByCollect(c, ch)
-// }
-//
-// However, this will not work if the metrics collected change dynamically over
-// the lifetime of the Collector in a way that their combined set of descriptors
-// changes as well. The shortcut implementation will then violate the contract
-// of the Describe method. If a Collector sometimes collects no metrics at all
-// (for example vectors like CounterVec, GaugeVec, etc., which only collect
-// metrics after a metric with a fully specified label set has been accessed),
-// it might even get registered as an unchecked Collector (cf. the Register
-// method of the Registerer interface). Hence, only use this shortcut
-// implementation of Describe if you are certain to fulfill the contract.
-//
-// The Collector example demonstrates a use of DescribeByCollect.
-func DescribeByCollect(c Collector, descs chan<- *Desc) {
- metrics := make(chan Metric)
- go func() {
- c.Collect(metrics)
- close(metrics)
- }()
- for m := range metrics {
- descs <- m.Desc()
- }
-}
-
-// selfCollector implements Collector for a single Metric so that the Metric
-// collects itself. Add it as an anonymous field to a struct that implements
-// Metric, and call init with the Metric itself as an argument.
-type selfCollector struct {
- self Metric
-}
-
-// init provides the selfCollector with a reference to the metric it is supposed
-// to collect. It is usually called within the factory function to create a
-// metric. See example.
-func (c *selfCollector) init(self Metric) {
- c.self = self
-}
-
-// Describe implements Collector.
-func (c *selfCollector) Describe(ch chan<- *Desc) {
- ch <- c.self.Desc()
-}
-
-// Collect implements Collector.
-func (c *selfCollector) Collect(ch chan<- Metric) {
- ch <- c.self
-}
-
-// collectorMetric is a metric that is also a collector.
-// Because of selfCollector, most (if not all) Metrics in
-// this package are also collectors.
-type collectorMetric interface {
- Metric
- Collector
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
deleted file mode 100644
index 4ce84e7a8..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ /dev/null
@@ -1,358 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "errors"
- "math"
- "sync/atomic"
- "time"
-
- dto "github.com/prometheus/client_model/go"
- "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-// Counter is a Metric that represents a single numerical value that only ever
-// goes up. That implies that it cannot be used to count items whose number can
-// also go down, e.g. the number of currently running goroutines. Those
-// "counters" are represented by Gauges.
-//
-// A Counter is typically used to count requests served, tasks completed, errors
-// occurred, etc.
-//
-// To create Counter instances, use NewCounter.
-type Counter interface {
- Metric
- Collector
-
- // Inc increments the counter by 1. Use Add to increment it by arbitrary
- // non-negative values.
- Inc()
- // Add adds the given value to the counter. It panics if the value is <
- // 0.
- Add(float64)
-}
-
-// ExemplarAdder is implemented by Counters that offer the option of adding a
-// value to the Counter together with an exemplar. Its AddWithExemplar method
-// works like the Add method of the Counter interface but also replaces the
-// currently saved exemplar (if any) with a new one, created from the provided
-// value, the current time as timestamp, and the provided labels. Empty Labels
-// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
-// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
-// of the provided labels are invalid, or if the provided labels contain more
-// than 128 runes in total.
-type ExemplarAdder interface {
- AddWithExemplar(value float64, exemplar Labels)
-}
-
-// CounterOpts is an alias for Opts. See there for doc comments.
-type CounterOpts Opts
-
-// CounterVecOpts bundles the options to create a CounterVec metric.
-// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
-// is optional and can safely be left to its default value.
-type CounterVecOpts struct {
- CounterOpts
-
- // VariableLabels are used to partition the metric vector by the given set
- // of labels. Each label value will be constrained with the optional Constraint
- // function, if provided.
- VariableLabels ConstrainableLabels
-}
-
-// NewCounter creates a new Counter based on the provided CounterOpts.
-//
-// The returned implementation also implements ExemplarAdder. It is safe to
-// perform the corresponding type assertion.
-//
-// The returned implementation tracks the counter value in two separate
-// variables, a float64 and a uint64. The latter is used to track calls of the
-// Inc method and calls of the Add method with a value that can be represented
-// as a uint64. This allows atomic increments of the counter with optimal
-// performance. (It is common to have an Inc call in very hot execution paths.)
-// Both internal tracking values are added up in the Write method. This has to
-// be taken into account when it comes to precision and overflow behavior.
-func NewCounter(opts CounterOpts) Counter {
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- )
- if opts.now == nil {
- opts.now = time.Now
- }
- result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now}
- result.init(result) // Init self-collection.
- result.createdTs = timestamppb.New(opts.now())
- return result
-}
-
-type counter struct {
- // valBits contains the bits of the represented float64 value, while
- // valInt stores values that are exact integers. Both have to go first
- // in the struct to guarantee alignment for atomic operations.
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- valBits uint64
- valInt uint64
-
- selfCollector
- desc *Desc
-
- createdTs *timestamppb.Timestamp
- labelPairs []*dto.LabelPair
- exemplar atomic.Value // Containing nil or a *dto.Exemplar.
-
- // now is for testing purposes, by default it's time.Now.
- now func() time.Time
-}
-
-func (c *counter) Desc() *Desc {
- return c.desc
-}
-
-func (c *counter) Add(v float64) {
- if v < 0 {
- panic(errors.New("counter cannot decrease in value"))
- }
-
- ival := uint64(v)
- if float64(ival) == v {
- atomic.AddUint64(&c.valInt, ival)
- return
- }
-
- for {
- oldBits := atomic.LoadUint64(&c.valBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
- if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
- return
- }
- }
-}
-
-func (c *counter) AddWithExemplar(v float64, e Labels) {
- c.Add(v)
- c.updateExemplar(v, e)
-}
-
-func (c *counter) Inc() {
- atomic.AddUint64(&c.valInt, 1)
-}
-
-func (c *counter) get() float64 {
- fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
- ival := atomic.LoadUint64(&c.valInt)
- return fval + float64(ival)
-}
-
-func (c *counter) Write(out *dto.Metric) error {
- // Read the Exemplar first and the value second. This is to avoid a race condition
- // where users see an exemplar for a not-yet-existing observation.
- var exemplar *dto.Exemplar
- if e := c.exemplar.Load(); e != nil {
- exemplar = e.(*dto.Exemplar)
- }
- val := c.get()
- return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs)
-}
-
-func (c *counter) updateExemplar(v float64, l Labels) {
- if l == nil {
- return
- }
- e, err := newExemplar(v, c.now(), l)
- if err != nil {
- panic(err)
- }
- c.exemplar.Store(e)
-}
-
-// CounterVec is a Collector that bundles a set of Counters that all share the
-// same Desc, but have different values for their variable labels. This is used
-// if you want to count the same thing partitioned by various dimensions
-// (e.g. number of HTTP requests, partitioned by response code and
-// method). Create instances with NewCounterVec.
-type CounterVec struct {
- *MetricVec
-}
-
-// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
-// partitioned by the given label names.
-func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
- return V2.NewCounterVec(CounterVecOpts{
- CounterOpts: opts,
- VariableLabels: UnconstrainedLabels(labelNames),
- })
-}
-
-// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
-func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
- desc := V2.NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- opts.VariableLabels,
- opts.ConstLabels,
- )
- if opts.now == nil {
- opts.now = time.Now
- }
- return &CounterVec{
- MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- if len(lvs) != len(desc.variableLabels.names) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
- }
- result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now}
- result.init(result) // Init self-collection.
- result.createdTs = timestamppb.New(opts.now())
- return result
- }),
- }
-}
-
-// GetMetricWithLabelValues returns the Counter for the given slice of label
-// values (same order as the variable labels in Desc). If that combination of
-// label values is accessed for the first time, a new Counter is created.
-//
-// It is possible to call this method without using the returned Counter to only
-// create the new Counter but leave it at its starting value 0. See also the
-// SummaryVec example.
-//
-// Keeping the Counter for later use is possible (and should be considered if
-// performance is critical), but keep in mind that Reset, DeleteLabelValues and
-// Delete can be used to delete the Counter from the CounterVec. In that case,
-// the Counter will still exist, but it will not be exported anymore, even if a
-// Counter with the same label values is created later.
-//
-// An error is returned if the number of label values is not the same as the
-// number of variable labels in Desc (minus any curried labels).
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
-// an alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the GaugeVec example.
-func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
- metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Counter), err
- }
- return nil, err
-}
-
-// GetMetricWith returns the Counter for the given Labels map (the label names
-// must match those of the variable labels in Desc). If that label map is
-// accessed for the first time, a new Counter is created. Implications of
-// creating a Counter without using it and keeping the Counter for later use are
-// the same as for GetMetricWithLabelValues.
-//
-// An error is returned if the number and names of the Labels are inconsistent
-// with those of the variable labels in Desc (minus any curried labels).
-//
-// This method is used for the same purpose as
-// GetMetricWithLabelValues(...string). See there for pros and cons of the two
-// methods.
-func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
- metric, err := v.MetricVec.GetMetricWith(labels)
- if metric != nil {
- return metric.(Counter), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. Not returning an
-// error allows shortcuts like
-//
-// myVec.WithLabelValues("404", "GET").Add(42)
-func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
- c, err := v.GetMetricWithLabelValues(lvs...)
- if err != nil {
- panic(err)
- }
- return c
-}
-
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. Not returning an error allows shortcuts like
-//
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
-func (v *CounterVec) With(labels Labels) Counter {
- c, err := v.GetMetricWith(labels)
- if err != nil {
- panic(err)
- }
- return c
-}
-
-// CurryWith returns a vector curried with the provided labels, i.e. the
-// returned vector has those labels pre-set for all labeled operations performed
-// on it. The cardinality of the curried vector is reduced accordingly. The
-// order of the remaining labels stays the same (just with the curried labels
-// taken out of the sequence – which is relevant for the
-// (GetMetric)WithLabelValues methods). It is possible to curry a curried
-// vector, but only with labels not yet used for currying before.
-//
-// The metrics contained in the CounterVec are shared between the curried and
-// uncurried vectors. They are just accessed differently. Curried and uncurried
-// vectors behave identically in terms of collection. Only one must be
-// registered with a given registry (usually the uncurried version). The Reset
-// method deletes all metrics, even if called on a curried vector.
-func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
- vec, err := v.MetricVec.CurryWith(labels)
- if vec != nil {
- return &CounterVec{vec}, err
- }
- return nil, err
-}
-
-// MustCurryWith works as CurryWith but panics where CurryWith would have
-// returned an error.
-func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
- vec, err := v.CurryWith(labels)
- if err != nil {
- panic(err)
- }
- return vec
-}
-
-// CounterFunc is a Counter whose value is determined at collect time by calling a
-// provided function.
-//
-// To create CounterFunc instances, use NewCounterFunc.
-type CounterFunc interface {
- Metric
- Collector
-}
-
-// NewCounterFunc creates a new CounterFunc based on the provided
-// CounterOpts. The value reported is determined by calling the given function
-// from within the Write method. Take into account that metric collection may
-// happen concurrently. If that results in concurrent calls to Write, like in
-// the case where a CounterFunc is directly registered with Prometheus, the
-// provided function must be concurrency-safe. The function should also honor
-// the contract for a Counter (values only go up, not down), but compliance will
-// not be checked.
-//
-// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
-func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
- return newValueFunc(NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ), CounterValue, function)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
deleted file mode 100644
index 68ffe3c24..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2016 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "fmt"
- "sort"
- "strings"
-
- "github.com/cespare/xxhash/v2"
- dto "github.com/prometheus/client_model/go"
- "github.com/prometheus/common/model"
- "google.golang.org/protobuf/proto"
-
- "github.com/prometheus/client_golang/prometheus/internal"
-)
-
-// Desc is the descriptor used by every Prometheus Metric. It is essentially
-// the immutable meta-data of a Metric. The normal Metric implementations
-// included in this package manage their Desc under the hood. Users only have to
-// deal with Desc if they use advanced features like the ExpvarCollector or
-// custom Collectors and Metrics.
-//
-// Descriptors registered with the same registry have to fulfill certain
-// consistency and uniqueness criteria if they share the same fully-qualified
-// name: They must have the same help string and the same label names (aka label
-// dimensions) in each, constLabels and variableLabels, but they must differ in
-// the values of the constLabels.
-//
-// Descriptors that share the same fully-qualified names and the same label
-// values of their constLabels are considered equal.
-//
-// Use NewDesc to create new Desc instances.
-type Desc struct {
- // fqName has been built from Namespace, Subsystem, and Name.
- fqName string
- // help provides some helpful information about this metric.
- help string
- // constLabelPairs contains precalculated DTO label pairs based on
- // the constant labels.
- constLabelPairs []*dto.LabelPair
- // variableLabels contains names of labels and normalization function for
- // which the metric maintains variable values.
- variableLabels *compiledLabels
- // id is a hash of the values of the ConstLabels and fqName. This
- // must be unique among all registered descriptors and can therefore be
- // used as an identifier of the descriptor.
- id uint64
- // dimHash is a hash of the label names (preset and variable) and the
- // Help string. Each Desc with the same fqName must have the same
- // dimHash.
- dimHash uint64
- // err is an error that occurred during construction. It is reported on
- // registration time.
- err error
-}
-
-// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
-// and will be reported on registration time. variableLabels and constLabels can
-// be nil if no such labels should be set. fqName must not be empty.
-//
-// variableLabels only contain the label names. Their label values are variable
-// and therefore not part of the Desc. (They are managed within the Metric.)
-//
-// For constLabels, the label values are constant. Therefore, they are fully
-// specified in the Desc. See the Collector example for a usage pattern.
-func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
- return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
-}
-
-// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
-// and will be reported on registration time. variableLabels and constLabels can
-// be nil if no such labels should be set. fqName must not be empty.
-//
-// variableLabels only contain the label names and normalization functions. Their
-// label values are variable and therefore not part of the Desc. (They are managed
-// within the Metric.)
-//
-// For constLabels, the label values are constant. Therefore, they are fully
-// specified in the Desc. See the Collector example for a usage pattern.
-func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
- d := &Desc{
- fqName: fqName,
- help: help,
- variableLabels: variableLabels.compile(),
- }
- if !model.IsValidMetricName(model.LabelValue(fqName)) {
- d.err = fmt.Errorf("%q is not a valid metric name", fqName)
- return d
- }
- // labelValues contains the label values of const labels (in order of
- // their sorted label names) plus the fqName (at position 0).
- labelValues := make([]string, 1, len(constLabels)+1)
- labelValues[0] = fqName
- labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names))
- labelNameSet := map[string]struct{}{}
- // First add only the const label names and sort them...
- for labelName := range constLabels {
- if !checkLabelName(labelName) {
- d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
- return d
- }
- labelNames = append(labelNames, labelName)
- labelNameSet[labelName] = struct{}{}
- }
- sort.Strings(labelNames)
- // ... so that we can now add const label values in the order of their names.
- for _, labelName := range labelNames {
- labelValues = append(labelValues, constLabels[labelName])
- }
- // Validate the const label values. They can't have a wrong cardinality, so
- // use in len(labelValues) as expectedNumberOfValues.
- if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
- d.err = err
- return d
- }
- // Now add the variable label names, but prefix them with something that
- // cannot be in a regular label name. That prevents matching the label
- // dimension with a different mix between preset and variable labels.
- for _, label := range d.variableLabels.names {
- if !checkLabelName(label) {
- d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName)
- return d
- }
- labelNames = append(labelNames, "$"+label)
- labelNameSet[label] = struct{}{}
- }
- if len(labelNames) != len(labelNameSet) {
- d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
- return d
- }
-
- xxh := xxhash.New()
- for _, val := range labelValues {
- xxh.WriteString(val)
- xxh.Write(separatorByteSlice)
- }
- d.id = xxh.Sum64()
- // Sort labelNames so that order doesn't matter for the hash.
- sort.Strings(labelNames)
- // Now hash together (in this order) the help string and the sorted
- // label names.
- xxh.Reset()
- xxh.WriteString(help)
- xxh.Write(separatorByteSlice)
- for _, labelName := range labelNames {
- xxh.WriteString(labelName)
- xxh.Write(separatorByteSlice)
- }
- d.dimHash = xxh.Sum64()
-
- d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
- for n, v := range constLabels {
- d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
- Name: proto.String(n),
- Value: proto.String(v),
- })
- }
- sort.Sort(internal.LabelPairSorter(d.constLabelPairs))
- return d
-}
-
-// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
-// provided error set. If a collector returning such a descriptor is registered,
-// registration will fail with the provided error. NewInvalidDesc can be used by
-// a Collector to signal inability to describe itself.
-func NewInvalidDesc(err error) *Desc {
- return &Desc{
- err: err,
- }
-}
-
-func (d *Desc) String() string {
- lpStrings := make([]string, 0, len(d.constLabelPairs))
- for _, lp := range d.constLabelPairs {
- lpStrings = append(
- lpStrings,
- fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
- )
- }
- vlStrings := make([]string, 0, len(d.variableLabels.names))
- for _, vl := range d.variableLabels.names {
- if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
- vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
- } else {
- vlStrings = append(vlStrings, vl)
- }
- }
- return fmt.Sprintf(
- "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}",
- d.fqName,
- d.help,
- strings.Join(lpStrings, ","),
- strings.Join(vlStrings, ","),
- )
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
deleted file mode 100644
index 962608f02..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package prometheus is the core instrumentation package. It provides metrics
-// primitives to instrument code for monitoring. It also offers a registry for
-// metrics. Sub-packages allow to expose the registered metrics via HTTP
-// (package promhttp) or push them to a Pushgateway (package push). There is
-// also a sub-package promauto, which provides metrics constructors with
-// automatic registration.
-//
-// All exported functions and methods are safe to be used concurrently unless
-// specified otherwise.
-//
-// # A Basic Example
-//
-// As a starting point, a very basic usage example:
-//
-// package main
-//
-// import (
-// "log"
-// "net/http"
-//
-// "github.com/prometheus/client_golang/prometheus"
-// "github.com/prometheus/client_golang/prometheus/promhttp"
-// )
-//
-// type metrics struct {
-// cpuTemp prometheus.Gauge
-// hdFailures *prometheus.CounterVec
-// }
-//
-// func NewMetrics(reg prometheus.Registerer) *metrics {
-// m := &metrics{
-// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
-// Name: "cpu_temperature_celsius",
-// Help: "Current temperature of the CPU.",
-// }),
-// hdFailures: prometheus.NewCounterVec(
-// prometheus.CounterOpts{
-// Name: "hd_errors_total",
-// Help: "Number of hard-disk errors.",
-// },
-// []string{"device"},
-// ),
-// }
-// reg.MustRegister(m.cpuTemp)
-// reg.MustRegister(m.hdFailures)
-// return m
-// }
-//
-// func main() {
-// // Create a non-global registry.
-// reg := prometheus.NewRegistry()
-//
-// // Create new metrics and register them using the custom registry.
-// m := NewMetrics(reg)
-// // Set values for the new created metrics.
-// m.cpuTemp.Set(65.3)
-// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
-//
-// // Expose metrics and custom registry via an HTTP server
-// // using the HandleFor function. "/metrics" is the usual endpoint for that.
-// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
-// log.Fatal(http.ListenAndServe(":8080", nil))
-// }
-//
-// This is a complete program that exports two metrics, a Gauge and a Counter,
-// the latter with a label attached to turn it into a (one-dimensional) vector.
-// It register the metrics using a custom registry and exposes them via an HTTP server
-// on the /metrics endpoint.
-//
-// # Metrics
-//
-// The number of exported identifiers in this package might appear a bit
-// overwhelming. However, in addition to the basic plumbing shown in the example
-// above, you only need to understand the different metric types and their
-// vector versions for basic usage. Furthermore, if you are not concerned with
-// fine-grained control of when and how to register metrics with the registry,
-// have a look at the promauto package, which will effectively allow you to
-// ignore registration altogether in simple cases.
-//
-// Above, you have already touched the Counter and the Gauge. There are two more
-// advanced metric types: the Summary and Histogram. A more thorough description
-// of those four metric types can be found in the Prometheus docs:
-// https://prometheus.io/docs/concepts/metric_types/
-//
-// In addition to the fundamental metric types Gauge, Counter, Summary, and
-// Histogram, a very important part of the Prometheus data model is the
-// partitioning of samples along dimensions called labels, which results in
-// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
-// and HistogramVec.
-//
-// While only the fundamental metric types implement the Metric interface, both
-// the metrics and their vector versions implement the Collector interface. A
-// Collector manages the collection of a number of Metrics, but for convenience,
-// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and
-// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec,
-// and HistogramVec are not.
-//
-// To create instances of Metrics and their vector versions, you need a suitable
-// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
-//
-// # Custom Collectors and constant Metrics
-//
-// While you could create your own implementations of Metric, most likely you
-// will only ever implement the Collector interface on your own. At a first
-// glance, a custom Collector seems handy to bundle Metrics for common
-// registration (with the prime example of the different metric vectors above,
-// which bundle all the metrics of the same name but with different labels).
-//
-// There is a more involved use case, too: If you already have metrics
-// available, created outside of the Prometheus context, you don't need the
-// interface of the various Metric types. You essentially want to mirror the
-// existing numbers into Prometheus Metrics during collection. An own
-// implementation of the Collector interface is perfect for that. You can create
-// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
-// NewConstSummary (and their respective Must… versions). NewConstMetric is used
-// for all metric types with just a float64 as their value: Counter, Gauge, and
-// a special “type” called Untyped. Use the latter if you are not sure if the
-// mirrored metric is a Counter or a Gauge. Creation of the Metric instance
-// happens in the Collect method. The Describe method has to return separate
-// Desc instances, representative of the “throw-away” metrics to be created
-// later. NewDesc comes in handy to create those Desc instances. Alternatively,
-// you could return no Desc at all, which will mark the Collector “unchecked”.
-// No checks are performed at registration time, but metric consistency will
-// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape
-// errors. Thus, with unchecked Collectors, the responsibility to not collect
-// metrics that lead to inconsistencies in the total scrape result lies with the
-// implementer of the Collector. While this is not a desirable state, it is
-// sometimes necessary. The typical use case is a situation where the exact
-// metrics to be returned by a Collector cannot be predicted at registration
-// time, but the implementer has sufficient knowledge of the whole system to
-// guarantee metric consistency.
-//
-// The Collector example illustrates the use case. You can also look at the
-// source code of the processCollector (mirroring process metrics), the
-// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
-// metrics) as examples that are used in this package itself.
-//
-// If you just need to call a function to get a single float value to collect as
-// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
-// shortcuts.
-//
-// # Advanced Uses of the Registry
-//
-// While MustRegister is the by far most common way of registering a Collector,
-// sometimes you might want to handle the errors the registration might cause.
-// As suggested by the name, MustRegister panics if an error occurs. With the
-// Register function, the error is returned and can be handled.
-//
-// An error is returned if the registered Collector is incompatible or
-// inconsistent with already registered metrics. The registry aims for
-// consistency of the collected metrics according to the Prometheus data model.
-// Inconsistencies are ideally detected at registration time, not at collect
-// time. The former will usually be detected at start-up time of a program,
-// while the latter will only happen at scrape time, possibly not even on the
-// first scrape if the inconsistency only becomes relevant later. That is the
-// main reason why a Collector and a Metric have to describe themselves to the
-// registry.
-//
-// So far, everything we did operated on the so-called default registry, as it
-// can be found in the global DefaultRegisterer variable. With NewRegistry, you
-// can create a custom registry, or you can even implement the Registerer or
-// Gatherer interfaces yourself. The methods Register and Unregister work in the
-// same way on a custom registry as the global functions Register and Unregister
-// on the default registry.
-//
-// There are a number of uses for custom registries: You can use registries with
-// special properties, see NewPedanticRegistry. You can avoid global state, as
-// it is imposed by the DefaultRegisterer. You can use multiple registries at
-// the same time to expose different metrics in different ways. You can use
-// separate registries for testing purposes.
-//
-// Also note that the DefaultRegisterer comes registered with a Collector for Go
-// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
-// NewProcessCollector). With a custom registry, you are in control and decide
-// yourself about the Collectors to register.
-//
-// # HTTP Exposition
-//
-// The Registry implements the Gatherer interface. The caller of the Gather
-// method can then expose the gathered metrics in some way. Usually, the metrics
-// are served via HTTP on the /metrics endpoint. That's happening in the example
-// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
-//
-// # Pushing to the Pushgateway
-//
-// Function for pushing to the Pushgateway can be found in the push sub-package.
-//
-// # Graphite Bridge
-//
-// Functions and examples to push metrics from a Gatherer to Graphite can be
-// found in the graphite sub-package.
-//
-// # Other Means of Exposition
-//
-// More ways of exposing metrics can easily be added by following the approaches
-// of the existing implementations.
-package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
deleted file mode 100644
index de5a85629..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "encoding/json"
- "expvar"
-)
-
-type expvarCollector struct {
- exports map[string]*Desc
-}
-
-// NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewExpvarCollector instead.
-func NewExpvarCollector(exports map[string]*Desc) Collector {
- return &expvarCollector{
- exports: exports,
- }
-}
-
-// Describe implements Collector.
-func (e *expvarCollector) Describe(ch chan<- *Desc) {
- for _, desc := range e.exports {
- ch <- desc
- }
-}
-
-// Collect implements Collector.
-func (e *expvarCollector) Collect(ch chan<- Metric) {
- for name, desc := range e.exports {
- var m Metric
- expVar := expvar.Get(name)
- if expVar == nil {
- continue
- }
- var v interface{}
- labels := make([]string, len(desc.variableLabels.names))
- if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
- ch <- NewInvalidMetric(desc, err)
- continue
- }
- var processValue func(v interface{}, i int)
- processValue = func(v interface{}, i int) {
- if i >= len(labels) {
- copiedLabels := append(make([]string, 0, len(labels)), labels...)
- switch v := v.(type) {
- case float64:
- m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
- case bool:
- if v {
- m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
- } else {
- m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
- }
- default:
- return
- }
- ch <- m
- return
- }
- vm, ok := v.(map[string]interface{})
- if !ok {
- return
- }
- for lv, val := range vm {
- labels[i] = lv
- processValue(val, i+1)
- }
- }
- processValue(v, 0)
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
deleted file mode 100644
index 3d383a735..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-// Inline and byte-free variant of hash/fnv's fnv64a.
-
-const (
- offset64 = 14695981039346656037
- prime64 = 1099511628211
-)
-
-// hashNew initializies a new fnv64a hash value.
-func hashNew() uint64 {
- return offset64
-}
-
-// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
-func hashAdd(h uint64, s string) uint64 {
- for i := 0; i < len(s); i++ {
- h ^= uint64(s[i])
- h *= prime64
- }
- return h
-}
-
-// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
-func hashAddByte(h uint64, b byte) uint64 {
- h ^= uint64(b)
- h *= prime64
- return h
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
deleted file mode 100644
index dd2eac940..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
+++ /dev/null
@@ -1,311 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "math"
- "sync/atomic"
- "time"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// Gauge is a Metric that represents a single numerical value that can
-// arbitrarily go up and down.
-//
-// A Gauge is typically used for measured values like temperatures or current
-// memory usage, but also "counts" that can go up and down, like the number of
-// running goroutines.
-//
-// To create Gauge instances, use NewGauge.
-type Gauge interface {
- Metric
- Collector
-
- // Set sets the Gauge to an arbitrary value.
- Set(float64)
- // Inc increments the Gauge by 1. Use Add to increment it by arbitrary
- // values.
- Inc()
- // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
- // values.
- Dec()
- // Add adds the given value to the Gauge. (The value can be negative,
- // resulting in a decrease of the Gauge.)
- Add(float64)
- // Sub subtracts the given value from the Gauge. (The value can be
- // negative, resulting in an increase of the Gauge.)
- Sub(float64)
-
- // SetToCurrentTime sets the Gauge to the current Unix time in seconds.
- SetToCurrentTime()
-}
-
-// GaugeOpts is an alias for Opts. See there for doc comments.
-type GaugeOpts Opts
-
-// GaugeVecOpts bundles the options to create a GaugeVec metric.
-// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
-// is optional and can safely be left to its default value.
-type GaugeVecOpts struct {
- GaugeOpts
-
- // VariableLabels are used to partition the metric vector by the given set
- // of labels. Each label value will be constrained with the optional Constraint
- // function, if provided.
- VariableLabels ConstrainableLabels
-}
-
-// NewGauge creates a new Gauge based on the provided GaugeOpts.
-//
-// The returned implementation is optimized for a fast Set method. If you have a
-// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
-// the former. For example, the Inc method of the returned Gauge is slower than
-// the Inc method of a Counter returned by NewCounter. This matches the typical
-// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
-// the latter Inc-heavy.
-func NewGauge(opts GaugeOpts) Gauge {
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- )
- result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
- result.init(result) // Init self-collection.
- return result
-}
-
-type gauge struct {
- // valBits contains the bits of the represented float64 value. It has
- // to go first in the struct to guarantee alignment for atomic
- // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- valBits uint64
-
- selfCollector
-
- desc *Desc
- labelPairs []*dto.LabelPair
-}
-
-func (g *gauge) Desc() *Desc {
- return g.desc
-}
-
-func (g *gauge) Set(val float64) {
- atomic.StoreUint64(&g.valBits, math.Float64bits(val))
-}
-
-func (g *gauge) SetToCurrentTime() {
- g.Set(float64(time.Now().UnixNano()) / 1e9)
-}
-
-func (g *gauge) Inc() {
- g.Add(1)
-}
-
-func (g *gauge) Dec() {
- g.Add(-1)
-}
-
-func (g *gauge) Add(val float64) {
- for {
- oldBits := atomic.LoadUint64(&g.valBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
- if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
- return
- }
- }
-}
-
-func (g *gauge) Sub(val float64) {
- g.Add(val * -1)
-}
-
-func (g *gauge) Write(out *dto.Metric) error {
- val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
- return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil)
-}
-
-// GaugeVec is a Collector that bundles a set of Gauges that all share the same
-// Desc, but have different values for their variable labels. This is used if
-// you want to count the same thing partitioned by various dimensions
-// (e.g. number of operations queued, partitioned by user and operation
-// type). Create instances with NewGaugeVec.
-type GaugeVec struct {
- *MetricVec
-}
-
-// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
-// partitioned by the given label names.
-func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
- return V2.NewGaugeVec(GaugeVecOpts{
- GaugeOpts: opts,
- VariableLabels: UnconstrainedLabels(labelNames),
- })
-}
-
-// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
-func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
- desc := V2.NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- opts.VariableLabels,
- opts.ConstLabels,
- )
- return &GaugeVec{
- MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- if len(lvs) != len(desc.variableLabels.names) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
- }
- result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
- result.init(result) // Init self-collection.
- return result
- }),
- }
-}
-
-// GetMetricWithLabelValues returns the Gauge for the given slice of label
-// values (same order as the variable labels in Desc). If that combination of
-// label values is accessed for the first time, a new Gauge is created.
-//
-// It is possible to call this method without using the returned Gauge to only
-// create the new Gauge but leave it at its starting value 0. See also the
-// SummaryVec example.
-//
-// Keeping the Gauge for later use is possible (and should be considered if
-// performance is critical), but keep in mind that Reset, DeleteLabelValues and
-// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
-// Gauge will still exist, but it will not be exported anymore, even if a
-// Gauge with the same label values is created later. See also the CounterVec
-// example.
-//
-// An error is returned if the number of label values is not the same as the
-// number of variable labels in Desc (minus any curried labels).
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
-// an alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
- metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Gauge), err
- }
- return nil, err
-}
-
-// GetMetricWith returns the Gauge for the given Labels map (the label names
-// must match those of the variable labels in Desc). If that label map is
-// accessed for the first time, a new Gauge is created. Implications of
-// creating a Gauge without using it and keeping the Gauge for later use are
-// the same as for GetMetricWithLabelValues.
-//
-// An error is returned if the number and names of the Labels are inconsistent
-// with those of the variable labels in Desc (minus any curried labels).
-//
-// This method is used for the same purpose as
-// GetMetricWithLabelValues(...string). See there for pros and cons of the two
-// methods.
-func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
- metric, err := v.MetricVec.GetMetricWith(labels)
- if metric != nil {
- return metric.(Gauge), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. Not returning an
-// error allows shortcuts like
-//
-// myVec.WithLabelValues("404", "GET").Add(42)
-func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
- g, err := v.GetMetricWithLabelValues(lvs...)
- if err != nil {
- panic(err)
- }
- return g
-}
-
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. Not returning an error allows shortcuts like
-//
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
-func (v *GaugeVec) With(labels Labels) Gauge {
- g, err := v.GetMetricWith(labels)
- if err != nil {
- panic(err)
- }
- return g
-}
-
-// CurryWith returns a vector curried with the provided labels, i.e. the
-// returned vector has those labels pre-set for all labeled operations performed
-// on it. The cardinality of the curried vector is reduced accordingly. The
-// order of the remaining labels stays the same (just with the curried labels
-// taken out of the sequence – which is relevant for the
-// (GetMetric)WithLabelValues methods). It is possible to curry a curried
-// vector, but only with labels not yet used for currying before.
-//
-// The metrics contained in the GaugeVec are shared between the curried and
-// uncurried vectors. They are just accessed differently. Curried and uncurried
-// vectors behave identically in terms of collection. Only one must be
-// registered with a given registry (usually the uncurried version). The Reset
-// method deletes all metrics, even if called on a curried vector.
-func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
- vec, err := v.MetricVec.CurryWith(labels)
- if vec != nil {
- return &GaugeVec{vec}, err
- }
- return nil, err
-}
-
-// MustCurryWith works as CurryWith but panics where CurryWith would have
-// returned an error.
-func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
- vec, err := v.CurryWith(labels)
- if err != nil {
- panic(err)
- }
- return vec
-}
-
-// GaugeFunc is a Gauge whose value is determined at collect time by calling a
-// provided function.
-//
-// To create GaugeFunc instances, use NewGaugeFunc.
-type GaugeFunc interface {
- Metric
- Collector
-}
-
-// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
-// value reported is determined by calling the given function from within the
-// Write method. Take into account that metric collection may happen
-// concurrently. Therefore, it must be safe to call the provided function
-// concurrently.
-//
-// NewGaugeFunc is a good way to create an “info” style metric with a constant
-// value of 1. Example:
-// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56
-func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
- return newValueFunc(NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ), GaugeValue, function)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
deleted file mode 100644
index 614fd61be..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !js || wasm
-// +build !js wasm
-
-package prometheus
-
-import "os"
-
-func getPIDFn() func() (int, error) {
- pid := os.Getpid()
- return func() (int, error) {
- return pid, nil
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
deleted file mode 100644
index eaf8059ee..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build js && !wasm
-// +build js,!wasm
-
-package prometheus
-
-func getPIDFn() func() (int, error) {
- return func() (int, error) {
- return 1, nil
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
deleted file mode 100644
index 520cbd7d4..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ /dev/null
@@ -1,274 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "runtime"
- "runtime/debug"
- "time"
-)
-
-// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
-// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
-// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
-// populated using runtime/metrics. Those are the defaults we can't alter.
-func goRuntimeMemStats() memStatsMetrics {
- return memStatsMetrics{
- {
- desc: NewDesc(
- memstatNamespace("alloc_bytes"),
- "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("alloc_bytes_total"),
- "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("sys_bytes"),
- "Number of bytes obtained from system. Equals to /memory/classes/total:byte.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mallocs_total"),
- // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric.
- "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("frees_total"),
- "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_alloc_bytes"),
- "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_sys_bytes"),
- "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_idle_bytes"),
- "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_inuse_bytes"),
- "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_released_bytes"),
- "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_objects"),
- "Number of currently allocated objects. Equals to /gc/heap/objects:objects.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("stack_inuse_bytes"),
- "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("stack_sys_bytes"),
- "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mspan_inuse_bytes"),
- "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mspan_sys_bytes"),
- "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mcache_inuse_bytes"),
- "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mcache_sys_bytes"),
- "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("buck_hash_sys_bytes"),
- "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("gc_sys_bytes"),
- "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("other_sys_bytes"),
- "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("next_gc_bytes"),
- "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
- valType: GaugeValue,
- },
- }
-}
-
-type baseGoCollector struct {
- goroutinesDesc *Desc
- threadsDesc *Desc
- gcDesc *Desc
- gcLastTimeDesc *Desc
- goInfoDesc *Desc
-}
-
-func newBaseGoCollector() baseGoCollector {
- return baseGoCollector{
- goroutinesDesc: NewDesc(
- "go_goroutines",
- "Number of goroutines that currently exist.",
- nil, nil),
- threadsDesc: NewDesc(
- "go_threads",
- "Number of OS threads created.",
- nil, nil),
- gcDesc: NewDesc(
- "go_gc_duration_seconds",
- "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.",
- nil, nil),
- gcLastTimeDesc: NewDesc(
- "go_memstats_last_gc_time_seconds",
- "Number of seconds since 1970 of last garbage collection.",
- nil, nil),
- goInfoDesc: NewDesc(
- "go_info",
- "Information about the Go environment.",
- nil, Labels{"version": runtime.Version()}),
- }
-}
-
-// Describe returns all descriptions of the collector.
-func (c *baseGoCollector) Describe(ch chan<- *Desc) {
- ch <- c.goroutinesDesc
- ch <- c.threadsDesc
- ch <- c.gcDesc
- ch <- c.gcLastTimeDesc
- ch <- c.goInfoDesc
-}
-
-// Collect returns the current state of all metrics of the collector.
-func (c *baseGoCollector) Collect(ch chan<- Metric) {
- ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
-
- n := getRuntimeNumThreads()
- ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n)
-
- var stats debug.GCStats
- stats.PauseQuantiles = make([]time.Duration, 5)
- debug.ReadGCStats(&stats)
-
- quantiles := make(map[float64]float64)
- for idx, pq := range stats.PauseQuantiles[1:] {
- quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
- }
- quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
- ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
- ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
- ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
-}
-
-func memstatNamespace(s string) string {
- return "go_memstats_" + s
-}
-
-// memStatsMetrics provide description, evaluator, runtime/metrics name, and
-// value type for memstat metrics.
-type memStatsMetrics []struct {
- desc *Desc
- eval func(*runtime.MemStats) float64
- valType ValueType
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
deleted file mode 100644
index 897a6e906..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2021 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.17
-// +build !go1.17
-
-package prometheus
-
-import (
- "runtime"
- "sync"
- "time"
-)
-
-type goCollector struct {
- base baseGoCollector
-
- // ms... are memstats related.
- msLast *runtime.MemStats // Previously collected memstats.
- msLastTimestamp time.Time
- msMtx sync.Mutex // Protects msLast and msLastTimestamp.
- msMetrics memStatsMetrics
- msRead func(*runtime.MemStats) // For mocking in tests.
- msMaxWait time.Duration // Wait time for fresh memstats.
- msMaxAge time.Duration // Maximum allowed age of old memstats.
-}
-
-// NewGoCollector is the obsolete version of collectors.NewGoCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewGoCollector instead.
-func NewGoCollector() Collector {
- msMetrics := goRuntimeMemStats()
- msMetrics = append(msMetrics, struct {
- desc *Desc
- eval func(*runtime.MemStats) float64
- valType ValueType
- }{
- // This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
- desc: NewDesc(
- memstatNamespace("gc_cpu_fraction"),
- "The fraction of this program's available CPU time used by the GC since the program started.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
- valType: GaugeValue,
- })
- return &goCollector{
- base: newBaseGoCollector(),
- msLast: &runtime.MemStats{},
- msRead: runtime.ReadMemStats,
- msMaxWait: time.Second,
- msMaxAge: 5 * time.Minute,
- msMetrics: msMetrics,
- }
-}
-
-// Describe returns all descriptions of the collector.
-func (c *goCollector) Describe(ch chan<- *Desc) {
- c.base.Describe(ch)
- for _, i := range c.msMetrics {
- ch <- i.desc
- }
-}
-
-// Collect returns the current state of all metrics of the collector.
-func (c *goCollector) Collect(ch chan<- Metric) {
- var (
- ms = &runtime.MemStats{}
- done = make(chan struct{})
- )
- // Start reading memstats first as it might take a while.
- go func() {
- c.msRead(ms)
- c.msMtx.Lock()
- c.msLast = ms
- c.msLastTimestamp = time.Now()
- c.msMtx.Unlock()
- close(done)
- }()
-
- // Collect base non-memory metrics.
- c.base.Collect(ch)
-
- timer := time.NewTimer(c.msMaxWait)
- select {
- case <-done: // Our own ReadMemStats succeeded in time. Use it.
- timer.Stop() // Important for high collection frequencies to not pile up timers.
- c.msCollect(ch, ms)
- return
- case <-timer.C: // Time out, use last memstats if possible. Continue below.
- }
- c.msMtx.Lock()
- if time.Since(c.msLastTimestamp) < c.msMaxAge {
- // Last memstats are recent enough. Collect from them under the lock.
- c.msCollect(ch, c.msLast)
- c.msMtx.Unlock()
- return
- }
- // If we are here, the last memstats are too old or don't exist. We have
- // to wait until our own ReadMemStats finally completes. For that to
- // happen, we have to release the lock.
- c.msMtx.Unlock()
- <-done
- c.msCollect(ch, ms)
-}
-
-func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
- for _, i := range c.msMetrics {
- ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
deleted file mode 100644
index 511746417..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
+++ /dev/null
@@ -1,574 +0,0 @@
-// Copyright 2021 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.17
-// +build go1.17
-
-package prometheus
-
-import (
- "fmt"
- "math"
- "runtime"
- "runtime/metrics"
- "strings"
- "sync"
-
- "github.com/prometheus/client_golang/prometheus/internal"
-
- dto "github.com/prometheus/client_model/go"
- "google.golang.org/protobuf/proto"
-)
-
-const (
- // constants for strings referenced more than once.
- goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects"
- goGCHeapAllocsObjects = "/gc/heap/allocs:objects"
- goGCHeapFreesObjects = "/gc/heap/frees:objects"
- goGCHeapFreesBytes = "/gc/heap/frees:bytes"
- goGCHeapAllocsBytes = "/gc/heap/allocs:bytes"
- goGCHeapObjects = "/gc/heap/objects:objects"
- goGCHeapGoalBytes = "/gc/heap/goal:bytes"
- goMemoryClassesTotalBytes = "/memory/classes/total:bytes"
- goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes"
- goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes"
- goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes"
- goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes"
- goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes"
- goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes"
- goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes"
- goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes"
- goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes"
- goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes"
- goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes"
- goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes"
- goMemoryClassesOtherBytes = "/memory/classes/other:bytes"
-)
-
-// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic.
-var rmNamesForMemStatsMetrics = []string{
- goGCHeapTinyAllocsObjects,
- goGCHeapAllocsObjects,
- goGCHeapFreesObjects,
- goGCHeapAllocsBytes,
- goGCHeapObjects,
- goGCHeapGoalBytes,
- goMemoryClassesTotalBytes,
- goMemoryClassesHeapObjectsBytes,
- goMemoryClassesHeapUnusedBytes,
- goMemoryClassesHeapReleasedBytes,
- goMemoryClassesHeapFreeBytes,
- goMemoryClassesHeapStacksBytes,
- goMemoryClassesOSStacksBytes,
- goMemoryClassesMetadataMSpanInuseBytes,
- goMemoryClassesMetadataMSPanFreeBytes,
- goMemoryClassesMetadataMCacheInuseBytes,
- goMemoryClassesMetadataMCacheFreeBytes,
- goMemoryClassesProfilingBucketsBytes,
- goMemoryClassesMetadataOtherBytes,
- goMemoryClassesOtherBytes,
-}
-
-func bestEffortLookupRM(lookup []string) []metrics.Description {
- ret := make([]metrics.Description, 0, len(lookup))
- for _, rm := range metrics.All() {
- for _, m := range lookup {
- if m == rm.Name {
- ret = append(ret, rm)
- }
- }
- }
- return ret
-}
-
-type goCollector struct {
- base baseGoCollector
-
- // mu protects updates to all fields ensuring a consistent
- // snapshot is always produced by Collect.
- mu sync.Mutex
-
- // Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed).
- sampleBuf []metrics.Sample
- // sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums.
- sampleMap map[string]*metrics.Sample
-
- // rmExposedMetrics represents all runtime/metrics package metrics
- // that were configured to be exposed.
- rmExposedMetrics []collectorMetric
- rmExactSumMapForHist map[string]string
-
- // With Go 1.17, the runtime/metrics package was introduced.
- // From that point on, metric names produced by the runtime/metrics
- // package could be generated from runtime/metrics names. However,
- // these differ from the old names for the same values.
- //
- // This field exists to export the same values under the old names
- // as well.
- msMetrics memStatsMetrics
- msMetricsEnabled bool
-}
-
-type rmMetricDesc struct {
- metrics.Description
-}
-
-func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc {
- var descs []rmMetricDesc
- for _, d := range metrics.All() {
- var (
- deny = true
- desc rmMetricDesc
- )
-
- for _, r := range rules {
- if !r.Matcher.MatchString(d.Name) {
- continue
- }
- deny = r.Deny
- }
- if deny {
- continue
- }
-
- desc.Description = d
- descs = append(descs, desc)
- }
- return descs
-}
-
-func defaultGoCollectorOptions() internal.GoCollectorOptions {
- return internal.GoCollectorOptions{
- RuntimeMetricSumForHist: map[string]string{
- "/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes,
- "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes,
- },
- RuntimeMetricRules: []internal.GoCollectorRule{
- // Recommended metrics we want by default from runtime/metrics.
- {Matcher: internal.GoCollectorDefaultRuntimeMetrics},
- },
- }
-}
-
-// NewGoCollector is the obsolete version of collectors.NewGoCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewGoCollector instead.
-func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
- opt := defaultGoCollectorOptions()
- for _, o := range opts {
- o(&opt)
- }
-
- exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules)
-
- // Collect all histogram samples so that we can get their buckets.
- // The API guarantees that the buckets are always fixed for the lifetime
- // of the process.
- var histograms []metrics.Sample
- for _, d := range exposedDescriptions {
- if d.Kind == metrics.KindFloat64Histogram {
- histograms = append(histograms, metrics.Sample{Name: d.Name})
- }
- }
-
- if len(histograms) > 0 {
- metrics.Read(histograms)
- }
-
- bucketsMap := make(map[string][]float64)
- for i := range histograms {
- bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
- }
-
- // Generate a collector for each exposed runtime/metrics metric.
- metricSet := make([]collectorMetric, 0, len(exposedDescriptions))
- // SampleBuf is used for reading from runtime/metrics.
- // We are assuming the largest case to have stable pointers for sampleMap purposes.
- sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics))
- sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions))
- for _, d := range exposedDescriptions {
- namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description)
- if !ok {
- // Just ignore this metric; we can't do anything with it here.
- // If a user decides to use the latest version of Go, we don't want
- // to fail here. This condition is tested in TestExpectedRuntimeMetrics.
- continue
- }
- help := attachOriginalName(d.Description.Description, d.Name)
-
- sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
- sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
-
- var m collectorMetric
- if d.Kind == metrics.KindFloat64Histogram {
- _, hasSum := opt.RuntimeMetricSumForHist[d.Name]
- unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
- m = newBatchHistogram(
- NewDesc(
- BuildFQName(namespace, subsystem, name),
- help,
- nil,
- nil,
- ),
- internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
- hasSum,
- )
- } else if d.Cumulative {
- m = NewCounter(CounterOpts{
- Namespace: namespace,
- Subsystem: subsystem,
- Name: name,
- Help: help,
- },
- )
- } else {
- m = NewGauge(GaugeOpts{
- Namespace: namespace,
- Subsystem: subsystem,
- Name: name,
- Help: help,
- })
- }
- metricSet = append(metricSet, m)
- }
-
- // Add exact sum metrics to sampleBuf if not added before.
- for _, h := range histograms {
- sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name]
- if !ok {
- continue
- }
-
- if _, ok := sampleMap[sumMetric]; ok {
- continue
- }
- sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric})
- sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1]
- }
-
- var (
- msMetrics memStatsMetrics
- msDescriptions []metrics.Description
- )
-
- if !opt.DisableMemStatsLikeMetrics {
- msMetrics = goRuntimeMemStats()
- msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics)
-
- // Check if metric was not exposed before and if not, add to sampleBuf.
- for _, mdDesc := range msDescriptions {
- if _, ok := sampleMap[mdDesc.Name]; ok {
- continue
- }
- sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name})
- sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1]
- }
- }
-
- return &goCollector{
- base: newBaseGoCollector(),
- sampleBuf: sampleBuf,
- sampleMap: sampleMap,
- rmExposedMetrics: metricSet,
- rmExactSumMapForHist: opt.RuntimeMetricSumForHist,
- msMetrics: msMetrics,
- msMetricsEnabled: !opt.DisableMemStatsLikeMetrics,
- }
-}
-
-func attachOriginalName(desc, origName string) string {
- return fmt.Sprintf("%s Sourced from %s", desc, origName)
-}
-
-// Describe returns all descriptions of the collector.
-func (c *goCollector) Describe(ch chan<- *Desc) {
- c.base.Describe(ch)
- for _, i := range c.msMetrics {
- ch <- i.desc
- }
- for _, m := range c.rmExposedMetrics {
- ch <- m.Desc()
- }
-}
-
-// Collect returns the current state of all metrics of the collector.
-func (c *goCollector) Collect(ch chan<- Metric) {
- // Collect base non-memory metrics.
- c.base.Collect(ch)
-
- if len(c.sampleBuf) == 0 {
- return
- }
-
- // Collect must be thread-safe, so prevent concurrent use of
- // sampleBuf elements. Just read into sampleBuf but write all the data
- // we get into our Metrics or MemStats.
- //
- // This lock also ensures that the Metrics we send out are all from
- // the same updates, ensuring their mutual consistency insofar as
- // is guaranteed by the runtime/metrics package.
- //
- // N.B. This locking is heavy-handed, but Collect is expected to be called
- // relatively infrequently. Also the core operation here, metrics.Read,
- // is fast (O(tens of microseconds)) so contention should certainly be
- // low, though channel operations and any allocations may add to that.
- c.mu.Lock()
- defer c.mu.Unlock()
-
- // Populate runtime/metrics sample buffer.
- metrics.Read(c.sampleBuf)
-
- // Collect all our runtime/metrics user chose to expose from sampleBuf (if any).
- for i, metric := range c.rmExposedMetrics {
- // We created samples for exposed metrics first in order, so indexes match.
- sample := c.sampleBuf[i]
-
- // N.B. switch on concrete type because it's significantly more efficient
- // than checking for the Counter and Gauge interface implementations. In
- // this case, we control all the types here.
- switch m := metric.(type) {
- case *counter:
- // Guard against decreases. This should never happen, but a failure
- // to do so will result in a panic, which is a harsh consequence for
- // a metrics collection bug.
- v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
- if v1 > v0 {
- m.Add(unwrapScalarRMValue(sample.Value) - m.get())
- }
- m.Collect(ch)
- case *gauge:
- m.Set(unwrapScalarRMValue(sample.Value))
- m.Collect(ch)
- case *batchHistogram:
- m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
- m.Collect(ch)
- default:
- panic("unexpected metric type")
- }
- }
-
- if c.msMetricsEnabled {
- // ms is a dummy MemStats that we populate ourselves so that we can
- // populate the old metrics from it if goMemStatsCollection is enabled.
- var ms runtime.MemStats
- memStatsFromRM(&ms, c.sampleMap)
- for _, i := range c.msMetrics {
- ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
- }
- }
-}
-
-// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
-// to be scalar and returns the equivalent float64 value. Panics if the
-// value is not scalar.
-func unwrapScalarRMValue(v metrics.Value) float64 {
- switch v.Kind() {
- case metrics.KindUint64:
- return float64(v.Uint64())
- case metrics.KindFloat64:
- return v.Float64()
- case metrics.KindBad:
- // Unsupported metric.
- //
- // This should never happen because we always populate our metric
- // set from the runtime/metrics package.
- panic("unexpected bad kind metric")
- default:
- // Unsupported metric kind.
- //
- // This should never happen because we check for this during initialization
- // and flag and filter metrics whose kinds we don't understand.
- panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind()))
- }
-}
-
-// exactSumFor takes a runtime/metrics metric name (that is assumed to
-// be of kind KindFloat64Histogram) and returns its exact sum and whether
-// its exact sum exists.
-//
-// The runtime/metrics API for histograms doesn't currently expose exact
-// sums, but some of the other metrics are in fact exact sums of histograms.
-func (c *goCollector) exactSumFor(rmName string) float64 {
- sumName, ok := c.rmExactSumMapForHist[rmName]
- if !ok {
- return 0
- }
- s, ok := c.sampleMap[sumName]
- if !ok {
- return 0
- }
- return unwrapScalarRMValue(s.Value)
-}
-
-func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
- lookupOrZero := func(name string) uint64 {
- if s, ok := rm[name]; ok {
- return s.Value.Uint64()
- }
- return 0
- }
-
- // Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
- // The reason for this is because MemStats couldn't be extended at the time
- // but there was a desire to have Mallocs at least be a little more representative,
- // while having Mallocs - Frees still represent a live object count.
- // Unfortunately, MemStats doesn't actually export a large allocation count,
- // so it's impossible to pull this number out directly.
- tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects)
- ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs
- ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs
-
- ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes)
- ms.Sys = lookupOrZero(goMemoryClassesTotalBytes)
- ms.Lookups = 0 // Already always zero.
- ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes)
- ms.Alloc = ms.HeapAlloc
- ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes)
- ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes)
- ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes)
- ms.HeapSys = ms.HeapInuse + ms.HeapIdle
- ms.HeapObjects = lookupOrZero(goGCHeapObjects)
- ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes)
- ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes)
- ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes)
- ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes)
- ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes)
- ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes)
- ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes)
- ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes)
- ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes)
- ms.NextGC = lookupOrZero(goGCHeapGoalBytes)
-
- // N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
- // and often misleading due to the fact that it's an average over the lifetime
- // of the process.
- // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
- // for more details.
- ms.GCCPUFraction = 0
-}
-
-// batchHistogram is a mutable histogram that is updated
-// in batches.
-type batchHistogram struct {
- selfCollector
-
- // Static fields updated only once.
- desc *Desc
- hasSum bool
-
- // Because this histogram operates in batches, it just uses a
- // single mutex for everything. updates are always serialized
- // but Write calls may operate concurrently with updates.
- // Contention between these two sources should be rare.
- mu sync.Mutex
- buckets []float64 // Inclusive lower bounds, like runtime/metrics.
- counts []uint64
- sum float64 // Used if hasSum is true.
-}
-
-// newBatchHistogram creates a new batch histogram value with the given
-// Desc, buckets, and whether or not it has an exact sum available.
-//
-// buckets must always be from the runtime/metrics package, following
-// the same conventions.
-func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
- // We need to remove -Inf values. runtime/metrics keeps them around.
- // But -Inf bucket should not be allowed for prometheus histograms.
- if buckets[0] == math.Inf(-1) {
- buckets = buckets[1:]
- }
- h := &batchHistogram{
- desc: desc,
- buckets: buckets,
- // Because buckets follows runtime/metrics conventions, there's
- // 1 more value in the buckets list than there are buckets represented,
- // because in runtime/metrics, the bucket values represent *boundaries*,
- // and non-Inf boundaries are inclusive lower bounds for that bucket.
- counts: make([]uint64, len(buckets)-1),
- hasSum: hasSum,
- }
- h.init(h)
- return h
-}
-
-// update updates the batchHistogram from a runtime/metrics histogram.
-//
-// sum must be provided if the batchHistogram was created to have an exact sum.
-// h.buckets must be a strict subset of his.Buckets.
-func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
- counts, buckets := his.Counts, his.Buckets
-
- h.mu.Lock()
- defer h.mu.Unlock()
-
- // Clear buckets.
- for i := range h.counts {
- h.counts[i] = 0
- }
- // Copy and reduce buckets.
- var j int
- for i, count := range counts {
- h.counts[j] += count
- if buckets[i+1] == h.buckets[j+1] {
- j++
- }
- }
- if h.hasSum {
- h.sum = sum
- }
-}
-
-func (h *batchHistogram) Desc() *Desc {
- return h.desc
-}
-
-func (h *batchHistogram) Write(out *dto.Metric) error {
- h.mu.Lock()
- defer h.mu.Unlock()
-
- sum := float64(0)
- if h.hasSum {
- sum = h.sum
- }
- dtoBuckets := make([]*dto.Bucket, 0, len(h.counts))
- totalCount := uint64(0)
- for i, count := range h.counts {
- totalCount += count
- if !h.hasSum {
- if count != 0 {
- // N.B. This computed sum is an underestimate.
- sum += h.buckets[i] * float64(count)
- }
- }
-
- // Skip the +Inf bucket, but only for the bucket list.
- // It must still count for sum and totalCount.
- if math.IsInf(h.buckets[i+1], 1) {
- break
- }
- // Float64Histogram's upper bound is exclusive, so make it inclusive
- // by obtaining the next float64 value down, in order.
- upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i])
- dtoBuckets = append(dtoBuckets, &dto.Bucket{
- CumulativeCount: proto.Uint64(totalCount),
- UpperBound: proto.Float64(upperBound),
- })
- }
- out.Histogram = &dto.Histogram{
- Bucket: dtoBuckets,
- SampleCount: proto.Uint64(totalCount),
- SampleSum: proto.Float64(sum),
- }
- return nil
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
deleted file mode 100644
index 519db348a..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ /dev/null
@@ -1,1837 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "fmt"
- "math"
- "runtime"
- "sort"
- "sync"
- "sync/atomic"
- "time"
-
- dto "github.com/prometheus/client_model/go"
-
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-// nativeHistogramBounds for the frac of observed values. Only relevant for
-// schema > 0. The position in the slice is the schema. (0 is never used, just
-// here for convenience of using the schema directly as the index.)
-//
-// TODO(beorn7): Currently, we do a binary search into these slices. There are
-// ways to turn it into a small number of simple array lookups. It probably only
-// matters for schema 5 and beyond, but should be investigated. See this comment
-// as a starting point:
-// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310
-var nativeHistogramBounds = [][]float64{
- // Schema "0":
- {0.5},
- // Schema 1:
- {0.5, 0.7071067811865475},
- // Schema 2:
- {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144},
- // Schema 3:
- {
- 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048,
- 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711,
- },
- // Schema 4:
- {
- 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458,
- 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463,
- 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627,
- 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735,
- },
- // Schema 5:
- {
- 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117,
- 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887,
- 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666,
- 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159,
- 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112,
- 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823,
- 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533,
- 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999,
- },
- // Schema 6:
- {
- 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142,
- 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598,
- 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209,
- 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406,
- 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349,
- 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891,
- 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515,
- 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555,
- 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234,
- 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269,
- 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334,
- 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681,
- 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529,
- 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991,
- 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827,
- 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752,
- },
- // Schema 7:
- {
- 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764,
- 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894,
- 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309,
- 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545,
- 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393,
- 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595,
- 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754,
- 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704,
- 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907,
- 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665,
- 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253,
- 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329,
- 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032,
- 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728,
- 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265,
- 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076,
- 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491,
- 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908,
- 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126,
- 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777,
- 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764,
- 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465,
- 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821,
- 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981,
- 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312,
- 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842,
- 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671,
- 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263,
- 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943,
- 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368,
- 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164,
- 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328,
- },
- // Schema 8:
- {
- 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088,
- 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869,
- 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205,
- 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158,
- 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313,
- 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321,
- 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954,
- 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847,
- 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111,
- 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088,
- 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098,
- 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026,
- 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894,
- 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493,
- 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185,
- 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968,
- 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903,
- 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005,
- 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725,
- 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082,
- 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581,
- 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031,
- 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346,
- 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447,
- 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385,
- 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788,
- 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727,
- 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171,
- 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058,
- 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119,
- 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999,
- 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352,
- 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471,
- 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126,
- 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218,
- 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837,
- 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984,
- 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031,
- 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071,
- 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282,
- 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442,
- 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707,
- 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818,
- 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853,
- 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642,
- 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003,
- 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079,
- 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391,
- 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661,
- 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629,
- 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553,
- 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389,
- 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771,
- 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002,
- 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155,
- 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483,
- 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253,
- 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191,
- 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693,
- 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947,
- 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133,
- 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889,
- 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168,
- 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698,
- },
-}
-
-// The nativeHistogramBounds above can be generated with the code below.
-//
-// TODO(beorn7): It's tempting to actually use `go generate` to generate the
-// code above. However, this could lead to slightly different numbers on
-// different architectures. We still need to come to terms if we are fine with
-// that, or if we might prefer to specify precise numbers in the standard.
-//
-// var nativeHistogramBounds [][]float64 = make([][]float64, 9)
-//
-// func init() {
-// // Populate nativeHistogramBounds.
-// numBuckets := 1
-// for i := range nativeHistogramBounds {
-// bounds := []float64{0.5}
-// factor := math.Exp2(math.Exp2(float64(-i)))
-// for j := 0; j < numBuckets-1; j++ {
-// var bound float64
-// if (j+1)%2 == 0 {
-// // Use previously calculated value for increased precision.
-// bound = nativeHistogramBounds[i-1][j/2+1]
-// } else {
-// bound = bounds[j] * factor
-// }
-// bounds = append(bounds, bound)
-// }
-// numBuckets *= 2
-// nativeHistogramBounds[i] = bounds
-// }
-// }
-
-// A Histogram counts individual observations from an event or sample stream in
-// configurable static buckets (or in dynamic sparse buckets as part of the
-// experimental Native Histograms, see below for more details). Similar to a
-// Summary, it also provides a sum of observations and an observation count.
-//
-// On the Prometheus server, quantiles can be calculated from a Histogram using
-// the histogram_quantile PromQL function.
-//
-// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL
-// (see the documentation for detailed procedures). However, Histograms require
-// the user to pre-define suitable buckets, and they are in general less
-// accurate. (Both problems are addressed by the experimental Native
-// Histograms. To use them, configure a NativeHistogramBucketFactor in the
-// HistogramOpts. They also require a Prometheus server v2.40+ with the
-// corresponding feature flag enabled.)
-//
-// The Observe method of a Histogram has a very low performance overhead in
-// comparison with the Observe method of a Summary.
-//
-// To create Histogram instances, use NewHistogram.
-type Histogram interface {
- Metric
- Collector
-
- // Observe adds a single observation to the histogram. Observations are
- // usually positive or zero. Negative observations are accepted but
- // prevent current versions of Prometheus from properly detecting
- // counter resets in the sum of observations. (The experimental Native
- // Histograms handle negative observations properly.) See
- // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
- // for details.
- Observe(float64)
-}
-
-// bucketLabel is used for the label that defines the upper bound of a
-// bucket of a histogram ("le" -> "less or equal").
-const bucketLabel = "le"
-
-// DefBuckets are the default Histogram buckets. The default buckets are
-// tailored to broadly measure the response time (in seconds) of a network
-// service. Most likely, however, you will be required to define buckets
-// customized to your use case.
-var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
-
-// DefNativeHistogramZeroThreshold is the default value for
-// NativeHistogramZeroThreshold in the HistogramOpts.
-//
-// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation),
-// which is a bucket boundary at all possible resolutions.
-const DefNativeHistogramZeroThreshold = 2.938735877055719e-39
-
-// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold
-// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero
-// bucket that only receives observations of precisely zero.
-const NativeHistogramZeroThresholdZero = -1
-
-var errBucketLabelNotAllowed = fmt.Errorf(
- "%q is not allowed as label name in histograms", bucketLabel,
-)
-
-// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the
-// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not
-// counted and not included in the returned slice. The returned slice is meant
-// to be used for the Buckets field of HistogramOpts.
-//
-// The function panics if 'count' is zero or negative.
-func LinearBuckets(start, width float64, count int) []float64 {
- if count < 1 {
- panic("LinearBuckets needs a positive count")
- }
- buckets := make([]float64, count)
- for i := range buckets {
- buckets[i] = start
- start += width
- }
- return buckets
-}
-
-// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket
-// has an upper bound of 'start' and each following bucket's upper bound is
-// 'factor' times the previous bucket's upper bound. The final +Inf bucket is
-// not counted and not included in the returned slice. The returned slice is
-// meant to be used for the Buckets field of HistogramOpts.
-//
-// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
-// or if 'factor' is less than or equal 1.
-func ExponentialBuckets(start, factor float64, count int) []float64 {
- if count < 1 {
- panic("ExponentialBuckets needs a positive count")
- }
- if start <= 0 {
- panic("ExponentialBuckets needs a positive start value")
- }
- if factor <= 1 {
- panic("ExponentialBuckets needs a factor greater than 1")
- }
- buckets := make([]float64, count)
- for i := range buckets {
- buckets[i] = start
- start *= factor
- }
- return buckets
-}
-
-// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is
-// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted
-// and not included in the returned slice. The returned slice is meant to be
-// used for the Buckets field of HistogramOpts.
-//
-// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
-func ExponentialBucketsRange(min, max float64, count int) []float64 {
- if count < 1 {
- panic("ExponentialBucketsRange count needs a positive count")
- }
- if min <= 0 {
- panic("ExponentialBucketsRange min needs to be greater than 0")
- }
-
- // Formula for exponential buckets.
- // max = min*growthFactor^(bucketCount-1)
-
- // We know max/min and highest bucket. Solve for growthFactor.
- growthFactor := math.Pow(max/min, 1.0/float64(count-1))
-
- // Now that we know growthFactor, solve for each bucket.
- buckets := make([]float64, count)
- for i := 1; i <= count; i++ {
- buckets[i-1] = min * math.Pow(growthFactor, float64(i-1))
- }
- return buckets
-}
-
-// HistogramOpts bundles the options for creating a Histogram metric. It is
-// mandatory to set Name to a non-empty string. All other fields are optional
-// and can safely be left at their zero value, although it is strongly
-// encouraged to set a Help string.
-type HistogramOpts struct {
- // Namespace, Subsystem, and Name are components of the fully-qualified
- // name of the Histogram (created by joining these components with
- // "_"). Only Name is mandatory, the others merely help structuring the
- // name. Note that the fully-qualified name of the Histogram must be a
- // valid Prometheus metric name.
- Namespace string
- Subsystem string
- Name string
-
- // Help provides information about this Histogram.
- //
- // Metrics with the same fully-qualified name must have the same Help
- // string.
- Help string
-
- // ConstLabels are used to attach fixed labels to this metric. Metrics
- // with the same fully-qualified name must have the same label names in
- // their ConstLabels.
- //
- // ConstLabels are only used rarely. In particular, do not use them to
- // attach the same labels to all your metrics. Those use cases are
- // better covered by target labels set by the scraping Prometheus
- // server, or by one specific metric (e.g. a build_info or a
- // machine_role metric). See also
- // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
- ConstLabels Labels
-
- // Buckets defines the buckets into which observations are counted. Each
- // element in the slice is the upper inclusive bound of a bucket. The
- // values must be sorted in strictly increasing order. There is no need
- // to add a highest bucket with +Inf bound, it will be added
- // implicitly. If Buckets is left as nil or set to a slice of length
- // zero, it is replaced by default buckets. The default buckets are
- // DefBuckets if no buckets for a native histogram (see below) are used,
- // otherwise the default is no buckets. (In other words, if you want to
- // use both regular buckets and buckets for a native histogram, you have
- // to define the regular buckets here explicitly.)
- Buckets []float64
-
- // If NativeHistogramBucketFactor is greater than one, so-called sparse
- // buckets are used (in addition to the regular buckets, if defined
- // above). A Histogram with sparse buckets will be ingested as a Native
- // Histogram by a Prometheus server with that feature enabled (requires
- // Prometheus v2.40+). Sparse buckets are exponential buckets covering
- // the whole float64 range (with the exception of the “zero” bucket, see
- // NativeHistogramZeroThreshold below). From any one bucket to the next,
- // the width of the bucket grows by a constant
- // factor. NativeHistogramBucketFactor provides an upper bound for this
- // factor (exception see below). The smaller
- // NativeHistogramBucketFactor, the more buckets will be used and thus
- // the more costly the histogram will become. A generally good trade-off
- // between cost and accuracy is a value of 1.1 (each bucket is at most
- // 10% wider than the previous one), which will result in each power of
- // two divided into 8 buckets (e.g. there will be 8 buckets between 1
- // and 2, same as between 2 and 4, and 4 and 8, etc.).
- //
- // Details about the actually used factor: The factor is calculated as
- // 2^(2^-n), where n is an integer number between (and including) -4 and
- // 8. n is chosen so that the resulting factor is the largest that is
- // still smaller or equal to NativeHistogramBucketFactor. Note that the
- // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
- // ). If NativeHistogramBucketFactor is greater than 1 but smaller than
- // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though
- // it is larger than the provided NativeHistogramBucketFactor.
- //
- // NOTE: Native Histograms are still an experimental feature. Their
- // behavior might still change without a major version
- // bump. Subsequently, all NativeHistogram... options here might still
- // change their behavior or name (or might completely disappear) without
- // a major version bump.
- NativeHistogramBucketFactor float64
- // All observations with an absolute value of less or equal
- // NativeHistogramZeroThreshold are accumulated into a “zero” bucket.
- // For best results, this should be close to a bucket boundary. This is
- // usually the case if picking a power of two. If
- // NativeHistogramZeroThreshold is left at zero,
- // DefNativeHistogramZeroThreshold is used as the threshold. To
- // configure a zero bucket with an actual threshold of zero (i.e. only
- // observations of precisely zero will go into the zero bucket), set
- // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
- // constant (or any negative float value).
- NativeHistogramZeroThreshold float64
-
- // The next three fields define a strategy to limit the number of
- // populated sparse buckets. If NativeHistogramMaxBucketNumber is left
- // at zero, the number of buckets is not limited. (Note that this might
- // lead to unbounded memory consumption if the values observed by the
- // Histogram are sufficiently wide-spread. In particular, this could be
- // used as a DoS attack vector. Where the observed values depend on
- // external inputs, it is highly recommended to set a
- // NativeHistogramMaxBucketNumber.) Once the set
- // NativeHistogramMaxBucketNumber is exceeded, the following strategy is
- // enacted:
- // - First, if the last reset (or the creation) of the histogram is at
- // least NativeHistogramMinResetDuration ago, then the whole
- // histogram is reset to its initial state (including regular
- // buckets).
- // - If less time has passed, or if NativeHistogramMinResetDuration is
- // zero, no reset is performed. Instead, the zero threshold is
- // increased sufficiently to reduce the number of buckets to or below
- // NativeHistogramMaxBucketNumber, but not to more than
- // NativeHistogramMaxZeroThreshold. Thus, if
- // NativeHistogramMaxZeroThreshold is already at or below the current
- // zero threshold, nothing happens at this step.
- // - After that, if the number of buckets still exceeds
- // NativeHistogramMaxBucketNumber, the resolution of the histogram is
- // reduced by doubling the width of the sparse buckets (up to a
- // growth factor between one bucket to the next of 2^(2^4) = 65536,
- // see above).
- // - Any increased zero threshold or reduced resolution is reset back
- // to their original values once NativeHistogramMinResetDuration has
- // passed (since the last reset or the creation of the histogram).
- NativeHistogramMaxBucketNumber uint32
- NativeHistogramMinResetDuration time.Duration
- NativeHistogramMaxZeroThreshold float64
-
- // NativeHistogramMaxExemplars limits the number of exemplars
- // that are kept in memory for each native histogram. If you leave it at
- // zero, a default value of 10 is used. If no exemplars should be kept specifically
- // for native histograms, set it to a negative value. (Scrapers can
- // still use the exemplars exposed for classic buckets, which are managed
- // independently.)
- NativeHistogramMaxExemplars int
- // NativeHistogramExemplarTTL is only checked once
- // NativeHistogramMaxExemplars is exceeded. In that case, the
- // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL.
- // Otherwise, the older exemplar in the pair of exemplars that are closest
- // together (on an exponential scale) is removed.
- // If NativeHistogramExemplarTTL is left at its zero value, a default value of
- // 5m is used. To always delete the oldest exemplar, set it to a negative value.
- NativeHistogramExemplarTTL time.Duration
-
- // now is for testing purposes, by default it's time.Now.
- now func() time.Time
-
- // afterFunc is for testing purposes, by default it's time.AfterFunc.
- afterFunc func(time.Duration, func()) *time.Timer
-}
-
-// HistogramVecOpts bundles the options to create a HistogramVec metric.
-// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels
-// is optional and can safely be left to its default value.
-type HistogramVecOpts struct {
- HistogramOpts
-
- // VariableLabels are used to partition the metric vector by the given set
- // of labels. Each label value will be constrained with the optional Constraint
- // function, if provided.
- VariableLabels ConstrainableLabels
-}
-
-// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
-// panics if the buckets in HistogramOpts are not in strictly increasing order.
-//
-// The returned implementation also implements ExemplarObserver. It is safe to
-// perform the corresponding type assertion. Exemplars are tracked separately
-// for each bucket.
-func NewHistogram(opts HistogramOpts) Histogram {
- return newHistogram(
- NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ),
- opts,
- )
-}
-
-func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
- if len(desc.variableLabels.names) != len(labelValues) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
- }
-
- for _, n := range desc.variableLabels.names {
- if n == bucketLabel {
- panic(errBucketLabelNotAllowed)
- }
- }
- for _, lp := range desc.constLabelPairs {
- if lp.GetName() == bucketLabel {
- panic(errBucketLabelNotAllowed)
- }
- }
-
- if opts.now == nil {
- opts.now = time.Now
- }
- if opts.afterFunc == nil {
- opts.afterFunc = time.AfterFunc
- }
-
- h := &histogram{
- desc: desc,
- upperBounds: opts.Buckets,
- labelPairs: MakeLabelPairs(desc, labelValues),
- nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber,
- nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold,
- nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
- lastResetTime: opts.now(),
- now: opts.now,
- afterFunc: opts.afterFunc,
- }
- if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
- h.upperBounds = DefBuckets
- }
- if opts.NativeHistogramBucketFactor <= 1 {
- h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets.
- } else {
- switch {
- case opts.NativeHistogramZeroThreshold > 0:
- h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold
- case opts.NativeHistogramZeroThreshold == 0:
- h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold
- } // Leave h.nativeHistogramZeroThreshold at 0 otherwise.
- h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor)
- h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars)
- }
- for i, upperBound := range h.upperBounds {
- if i < len(h.upperBounds)-1 {
- if upperBound >= h.upperBounds[i+1] {
- panic(fmt.Errorf(
- "histogram buckets must be in increasing order: %f >= %f",
- upperBound, h.upperBounds[i+1],
- ))
- }
- } else {
- if math.IsInf(upperBound, +1) {
- // The +Inf bucket is implicit. Remove it here.
- h.upperBounds = h.upperBounds[:i]
- }
- }
- }
- // Finally we know the final length of h.upperBounds and can make buckets
- // for both counts as well as exemplars:
- h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
- atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
- atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema)
- h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
- atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
- atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema)
- h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
-
- h.init(h) // Init self-collection.
- return h
-}
-
-type histogramCounts struct {
- // Order in this struct matters for the alignment required by atomic
- // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG
-
- // sumBits contains the bits of the float64 representing the sum of all
- // observations.
- sumBits uint64
- count uint64
-
- // nativeHistogramZeroBucket counts all (positive and negative)
- // observations in the zero bucket (with an absolute value less or equal
- // the current threshold, see next field.
- nativeHistogramZeroBucket uint64
- // nativeHistogramZeroThresholdBits is the bit pattern of the current
- // threshold for the zero bucket. It's initially equal to
- // nativeHistogramZeroThreshold but may change according to the bucket
- // count limitation strategy.
- nativeHistogramZeroThresholdBits uint64
- // nativeHistogramSchema may change over time according to the bucket
- // count limitation strategy and therefore has to be saved here.
- nativeHistogramSchema int32
- // Number of (positive and negative) sparse buckets.
- nativeHistogramBucketsNumber uint32
-
- // Regular buckets.
- buckets []uint64
-
- // The sparse buckets for native histograms are implemented with a
- // sync.Map for now. A dedicated data structure will likely be more
- // efficient. There are separate maps for negative and positive
- // observations. The map's value is an *int64, counting observations in
- // that bucket. (Note that we don't use uint64 as an int64 won't
- // overflow in practice, and working with signed numbers from the
- // beginning simplifies the handling of deltas.) The map's key is the
- // index of the bucket according to the used
- // nativeHistogramSchema. Index 0 is for an upper bound of 1.
- nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map
-}
-
-// observe manages the parts of observe that only affects
-// histogramCounts. doSparse is true if sparse buckets should be done,
-// too.
-func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
- if bucket < len(hc.buckets) {
- atomic.AddUint64(&hc.buckets[bucket], 1)
- }
- atomicAddFloat(&hc.sumBits, v)
- if doSparse && !math.IsNaN(v) {
- var (
- key int
- schema = atomic.LoadInt32(&hc.nativeHistogramSchema)
- zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits))
- bucketCreated, isInf bool
- )
- if math.IsInf(v, 0) {
- // Pretend v is MaxFloat64 but later increment key by one.
- if math.IsInf(v, +1) {
- v = math.MaxFloat64
- } else {
- v = -math.MaxFloat64
- }
- isInf = true
- }
- frac, exp := math.Frexp(math.Abs(v))
- if schema > 0 {
- bounds := nativeHistogramBounds[schema]
- key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds)
- } else {
- key = exp
- if frac == 0.5 {
- key--
- }
- offset := (1 << -schema) - 1
- key = (key + offset) >> -schema
- }
- if isInf {
- key++
- }
- switch {
- case v > zeroThreshold:
- bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1)
- case v < -zeroThreshold:
- bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1)
- default:
- atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1)
- }
- if bucketCreated {
- atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1)
- }
- }
- // Increment count last as we take it as a signal that the observation
- // is complete.
- atomic.AddUint64(&hc.count, 1)
-}
-
-type histogram struct {
- // countAndHotIdx enables lock-free writes with use of atomic updates.
- // The most significant bit is the hot index [0 or 1] of the count field
- // below. Observe calls update the hot one. All remaining bits count the
- // number of Observe calls. Observe starts by incrementing this counter,
- // and finish by incrementing the count field in the respective
- // histogramCounts, as a marker for completion.
- //
- // Calls of the Write method (which are non-mutating reads from the
- // perspective of the histogram) swap the hot–cold under the writeMtx
- // lock. A cooldown is awaited (while locked) by comparing the number of
- // observations with the initiation count. Once they match, then the
- // last observation on the now cool one has completed. All cold fields must
- // be merged into the new hot before releasing writeMtx.
- //
- // Fields with atomic access first! See alignment constraint:
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- countAndHotIdx uint64
-
- selfCollector
- desc *Desc
-
- // Only used in the Write method and for sparse bucket management.
- mtx sync.Mutex
-
- // Two counts, one is "hot" for lock-free observations, the other is
- // "cold" for writing out a dto.Metric. It has to be an array of
- // pointers to guarantee 64bit alignment of the histogramCounts, see
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
- counts [2]*histogramCounts
-
- upperBounds []float64
- labelPairs []*dto.LabelPair
- exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
- nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used.
- nativeHistogramZeroThreshold float64 // The initial zero threshold.
- nativeHistogramMaxZeroThreshold float64
- nativeHistogramMaxBuckets uint32
- nativeHistogramMinResetDuration time.Duration
- // lastResetTime is protected by mtx. It is also used as created timestamp.
- lastResetTime time.Time
- // resetScheduled is protected by mtx. It is true if a reset is
- // scheduled for a later time (when nativeHistogramMinResetDuration has
- // passed).
- resetScheduled bool
- nativeExemplars nativeExemplars
-
- // now is for testing purposes, by default it's time.Now.
- now func() time.Time
-
- // afterFunc is for testing purposes, by default it's time.AfterFunc.
- afterFunc func(time.Duration, func()) *time.Timer
-}
-
-func (h *histogram) Desc() *Desc {
- return h.desc
-}
-
-func (h *histogram) Observe(v float64) {
- h.observe(v, h.findBucket(v))
-}
-
-// ObserveWithExemplar should not be called in a high-frequency setting
-// for a native histogram with configured exemplars. For this case,
-// the implementation isn't lock-free and might suffer from lock contention.
-func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
- i := h.findBucket(v)
- h.observe(v, i)
- h.updateExemplar(v, i, e)
-}
-
-func (h *histogram) Write(out *dto.Metric) error {
- // For simplicity, we protect this whole method by a mutex. It is not in
- // the hot path, i.e. Observe is called much more often than Write. The
- // complication of making Write lock-free isn't worth it, if possible at
- // all.
- h.mtx.Lock()
- defer h.mtx.Unlock()
-
- // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
- // without touching the count bits. See the struct comments for a full
- // description of the algorithm.
- n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
- // count is contained unchanged in the lower 63 bits.
- count := n & ((1 << 63) - 1)
- // The most significant bit tells us which counts is hot. The complement
- // is thus the cold one.
- hotCounts := h.counts[n>>63]
- coldCounts := h.counts[(^n)>>63]
-
- waitForCooldown(count, coldCounts)
-
- his := &dto.Histogram{
- Bucket: make([]*dto.Bucket, len(h.upperBounds)),
- SampleCount: proto.Uint64(count),
- SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
- CreatedTimestamp: timestamppb.New(h.lastResetTime),
- }
- out.Histogram = his
- out.Label = h.labelPairs
-
- var cumCount uint64
- for i, upperBound := range h.upperBounds {
- cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
- his.Bucket[i] = &dto.Bucket{
- CumulativeCount: proto.Uint64(cumCount),
- UpperBound: proto.Float64(upperBound),
- }
- if e := h.exemplars[i].Load(); e != nil {
- his.Bucket[i].Exemplar = e.(*dto.Exemplar)
- }
- }
- // If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly.
- if e := h.exemplars[len(h.upperBounds)].Load(); e != nil {
- b := &dto.Bucket{
- CumulativeCount: proto.Uint64(count),
- UpperBound: proto.Float64(math.Inf(1)),
- Exemplar: e.(*dto.Exemplar),
- }
- his.Bucket = append(his.Bucket, b)
- }
- if h.nativeHistogramSchema > math.MinInt32 {
- his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits)))
- his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema))
- zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket)
-
- defer func() {
- coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber))
- coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber))
- }()
-
- his.ZeroCount = proto.Uint64(zeroBucket)
- his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative)
- his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive)
-
- // Add a no-op span to a histogram without observations and with
- // a zero threshold of zero. Otherwise, a native histogram would
- // look like a classic histogram to scrapers.
- if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 {
- his.PositiveSpan = []*dto.BucketSpan{{
- Offset: proto.Int32(0),
- Length: proto.Uint32(0),
- }}
- }
-
- if h.nativeExemplars.isEnabled() {
- h.nativeExemplars.Lock()
- his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...)
- h.nativeExemplars.Unlock()
- }
-
- }
- addAndResetCounts(hotCounts, coldCounts)
- return nil
-}
-
-// findBucket returns the index of the bucket for the provided value, or
-// len(h.upperBounds) for the +Inf bucket.
-func (h *histogram) findBucket(v float64) int {
- // TODO(beorn7): For small numbers of buckets (<30), a linear search is
- // slightly faster than the binary search. If we really care, we could
- // switch from one search strategy to the other depending on the number
- // of buckets.
- //
- // Microbenchmarks (BenchmarkHistogramNoLabels):
- // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
- // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
- // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
- return sort.SearchFloat64s(h.upperBounds, v)
-}
-
-// observe is the implementation for Observe without the findBucket part.
-func (h *histogram) observe(v float64, bucket int) {
- // Do not add to sparse buckets for NaN observations.
- doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
- // We increment h.countAndHotIdx so that the counter in the lower
- // 63 bits gets incremented. At the same time, we get the new value
- // back, which we can use to find the currently-hot counts.
- n := atomic.AddUint64(&h.countAndHotIdx, 1)
- hotCounts := h.counts[n>>63]
- hotCounts.observe(v, bucket, doSparse)
- if doSparse {
- h.limitBuckets(hotCounts, v, bucket)
- }
-}
-
-// limitBuckets applies a strategy to limit the number of populated sparse
-// buckets. It's generally best effort, and there are situations where the
-// number can go higher (if even the lowest resolution isn't enough to reduce
-// the number sufficiently, or if the provided counts aren't fully updated yet
-// by a concurrently happening Write call).
-func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) {
- if h.nativeHistogramMaxBuckets == 0 {
- return // No limit configured.
- }
- if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) {
- return // Bucket limit not exceeded yet.
- }
-
- h.mtx.Lock()
- defer h.mtx.Unlock()
-
- // The hot counts might have been swapped just before we acquired the
- // lock. Re-fetch the hot counts first...
- n := atomic.LoadUint64(&h.countAndHotIdx)
- hotIdx := n >> 63
- coldIdx := (^n) >> 63
- hotCounts := h.counts[hotIdx]
- coldCounts := h.counts[coldIdx]
- // ...and then check again if we really have to reduce the bucket count.
- if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) {
- return // Bucket limit not exceeded after all.
- }
- // Try the various strategies in order.
- if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) {
- return
- }
- // One of the other strategies will happen. To undo what they will do as
- // soon as enough time has passed to satisfy
- // h.nativeHistogramMinResetDuration, schedule a reset at the right time
- // if we haven't done so already.
- if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled {
- h.resetScheduled = true
- h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset)
- }
-
- if h.maybeWidenZeroBucket(hotCounts, coldCounts) {
- return
- }
- h.doubleBucketWidth(hotCounts, coldCounts)
-}
-
-// maybeReset resets the whole histogram if at least
-// h.nativeHistogramMinResetDuration has been passed. It returns true if the
-// histogram has been reset. The caller must have locked h.mtx.
-func (h *histogram) maybeReset(
- hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int,
-) bool {
- // We are using the possibly mocked h.now() rather than
- // time.Since(h.lastResetTime) to enable testing.
- if h.nativeHistogramMinResetDuration == 0 || // No reset configured.
- h.resetScheduled || // Do not interefere if a reset is already scheduled.
- h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
- return false
- }
- // Completely reset coldCounts.
- h.resetCounts(cold)
- // Repeat the latest observation to not lose it completely.
- cold.observe(value, bucket, true)
- // Make coldCounts the new hot counts while resetting countAndHotIdx.
- n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1)
- count := n & ((1 << 63) - 1)
- waitForCooldown(count, hot)
- // Finally, reset the formerly hot counts, too.
- h.resetCounts(hot)
- h.lastResetTime = h.now()
- return true
-}
-
-// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be
-// called without having locked h.mtx.
-func (h *histogram) reset() {
- h.mtx.Lock()
- defer h.mtx.Unlock()
-
- n := atomic.LoadUint64(&h.countAndHotIdx)
- hotIdx := n >> 63
- coldIdx := (^n) >> 63
- hot := h.counts[hotIdx]
- cold := h.counts[coldIdx]
- // Completely reset coldCounts.
- h.resetCounts(cold)
- // Make coldCounts the new hot counts while resetting countAndHotIdx.
- n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63)
- count := n & ((1 << 63) - 1)
- waitForCooldown(count, hot)
- // Finally, reset the formerly hot counts, too.
- h.resetCounts(hot)
- h.lastResetTime = h.now()
- h.resetScheduled = false
-}
-
-// maybeWidenZeroBucket widens the zero bucket until it includes the existing
-// buckets closest to the zero bucket (which could be two, if an equidistant
-// negative and a positive bucket exists, but usually it's only one bucket to be
-// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold
-// limits how far the zero bucket can be extended, and if that's not enough to
-// include an existing bucket, the method returns false. The caller must have
-// locked h.mtx.
-func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool {
- currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits))
- if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold {
- return false
- }
- // Find the key of the bucket closest to zero.
- smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive)
- smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative)
- if smallestNegativeKey < smallestKey {
- smallestKey = smallestNegativeKey
- }
- if smallestKey == math.MaxInt32 {
- return false
- }
- newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema))
- if newZeroThreshold > h.nativeHistogramMaxZeroThreshold {
- return false // New threshold would exceed the max threshold.
- }
- atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
- // Remove applicable buckets.
- if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded {
- atomicDecUint32(&cold.nativeHistogramBucketsNumber)
- }
- if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded {
- atomicDecUint32(&cold.nativeHistogramBucketsNumber)
- }
- // Make cold counts the new hot counts.
- n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
- count := n & ((1 << 63) - 1)
- // Swap the pointer names to represent the new roles and make
- // the rest less confusing.
- hot, cold = cold, hot
- waitForCooldown(count, cold)
- // Add all the now cold counts to the new hot counts...
- addAndResetCounts(hot, cold)
- // ...adjust the new zero threshold in the cold counts, too...
- atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
- // ...and then merge the newly deleted buckets into the wider zero
- // bucket.
- mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool {
- return func(k, v interface{}) bool {
- key := k.(int)
- bucket := v.(*int64)
- if key == smallestKey {
- // Merge into hot zero bucket...
- atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket)))
- // ...and delete from cold counts.
- coldBuckets.Delete(key)
- atomicDecUint32(&cold.nativeHistogramBucketsNumber)
- } else {
- // Add to corresponding hot bucket...
- if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
- atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
- }
- // ...and reset cold bucket.
- atomic.StoreInt64(bucket, 0)
- }
- return true
- }
- }
-
- cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive))
- cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative))
- return true
-}
-
-// doubleBucketWidth doubles the bucket width (by decrementing the schema
-// number). Note that very sparse buckets could lead to a low reduction of the
-// bucket count (or even no reduction at all). The method does nothing if the
-// schema is already -4.
-func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) {
- coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema)
- if coldSchema == -4 {
- return // Already at lowest resolution.
- }
- coldSchema--
- atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
- // Play it simple and just delete all cold buckets.
- atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
- deleteSyncMap(&cold.nativeHistogramBucketsNegative)
- deleteSyncMap(&cold.nativeHistogramBucketsPositive)
- // Make coldCounts the new hot counts.
- n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
- count := n & ((1 << 63) - 1)
- // Swap the pointer names to represent the new roles and make
- // the rest less confusing.
- hot, cold = cold, hot
- waitForCooldown(count, cold)
- // Add all the now cold counts to the new hot counts...
- addAndResetCounts(hot, cold)
- // ...adjust the schema in the cold counts, too...
- atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
- // ...and then merge the cold buckets into the wider hot buckets.
- merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool {
- return func(k, v interface{}) bool {
- key := k.(int)
- bucket := v.(*int64)
- // Adjust key to match the bucket to merge into.
- if key > 0 {
- key++
- }
- key /= 2
- // Add to corresponding hot bucket.
- if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
- atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
- }
- return true
- }
- }
-
- cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive))
- cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative))
- // Play it simple again and just delete all cold buckets.
- atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
- deleteSyncMap(&cold.nativeHistogramBucketsNegative)
- deleteSyncMap(&cold.nativeHistogramBucketsPositive)
-}
-
-func (h *histogram) resetCounts(counts *histogramCounts) {
- atomic.StoreUint64(&counts.sumBits, 0)
- atomic.StoreUint64(&counts.count, 0)
- atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0)
- atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
- atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema)
- atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0)
- for i := range h.upperBounds {
- atomic.StoreUint64(&counts.buckets[i], 0)
- }
- deleteSyncMap(&counts.nativeHistogramBucketsNegative)
- deleteSyncMap(&counts.nativeHistogramBucketsPositive)
-}
-
-// updateExemplar replaces the exemplar for the provided classic bucket.
-// With empty labels, it's a no-op. It panics if any of the labels is invalid.
-// If histogram is native, the exemplar will be cached into nativeExemplars,
-// which has a limit, and will remove one exemplar when limit is reached.
-func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
- if l == nil {
- return
- }
- e, err := newExemplar(v, h.now(), l)
- if err != nil {
- panic(err)
- }
- h.exemplars[bucket].Store(e)
- doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
- if doSparse {
- h.nativeExemplars.addExemplar(e)
- }
-}
-
-// HistogramVec is a Collector that bundles a set of Histograms that all share the
-// same Desc, but have different values for their variable labels. This is used
-// if you want to count the same thing partitioned by various dimensions
-// (e.g. HTTP request latencies, partitioned by status code and method). Create
-// instances with NewHistogramVec.
-type HistogramVec struct {
- *MetricVec
-}
-
-// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
-// partitioned by the given label names.
-func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
- return V2.NewHistogramVec(HistogramVecOpts{
- HistogramOpts: opts,
- VariableLabels: UnconstrainedLabels(labelNames),
- })
-}
-
-// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts.
-func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec {
- desc := V2.NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- opts.VariableLabels,
- opts.ConstLabels,
- )
- return &HistogramVec{
- MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- return newHistogram(desc, opts.HistogramOpts, lvs...)
- }),
- }
-}
-
-// GetMetricWithLabelValues returns the Histogram for the given slice of label
-// values (same order as the variable labels in Desc). If that combination of
-// label values is accessed for the first time, a new Histogram is created.
-//
-// It is possible to call this method without using the returned Histogram to only
-// create the new Histogram but leave it at its starting value, a Histogram without
-// any observations.
-//
-// Keeping the Histogram for later use is possible (and should be considered if
-// performance is critical), but keep in mind that Reset, DeleteLabelValues and
-// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
-// Histogram will still exist, but it will not be exported anymore, even if a
-// Histogram with the same label values is created later. See also the CounterVec
-// example.
-//
-// An error is returned if the number of label values is not the same as the
-// number of variable labels in Desc (minus any curried labels).
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
-// an alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the GaugeVec example.
-func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
- metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Observer), err
- }
- return nil, err
-}
-
-// GetMetricWith returns the Histogram for the given Labels map (the label names
-// must match those of the variable labels in Desc). If that label map is
-// accessed for the first time, a new Histogram is created. Implications of
-// creating a Histogram without using it and keeping the Histogram for later use
-// are the same as for GetMetricWithLabelValues.
-//
-// An error is returned if the number and names of the Labels are inconsistent
-// with those of the variable labels in Desc (minus any curried labels).
-//
-// This method is used for the same purpose as
-// GetMetricWithLabelValues(...string). See there for pros and cons of the two
-// methods.
-func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
- metric, err := v.MetricVec.GetMetricWith(labels)
- if metric != nil {
- return metric.(Observer), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. Not returning an
-// error allows shortcuts like
-//
-// myVec.WithLabelValues("404", "GET").Observe(42.21)
-func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
- h, err := v.GetMetricWithLabelValues(lvs...)
- if err != nil {
- panic(err)
- }
- return h
-}
-
-// With works as GetMetricWith but panics where GetMetricWithLabels would have
-// returned an error. Not returning an error allows shortcuts like
-//
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
-func (v *HistogramVec) With(labels Labels) Observer {
- h, err := v.GetMetricWith(labels)
- if err != nil {
- panic(err)
- }
- return h
-}
-
-// CurryWith returns a vector curried with the provided labels, i.e. the
-// returned vector has those labels pre-set for all labeled operations performed
-// on it. The cardinality of the curried vector is reduced accordingly. The
-// order of the remaining labels stays the same (just with the curried labels
-// taken out of the sequence – which is relevant for the
-// (GetMetric)WithLabelValues methods). It is possible to curry a curried
-// vector, but only with labels not yet used for currying before.
-//
-// The metrics contained in the HistogramVec are shared between the curried and
-// uncurried vectors. They are just accessed differently. Curried and uncurried
-// vectors behave identically in terms of collection. Only one must be
-// registered with a given registry (usually the uncurried version). The Reset
-// method deletes all metrics, even if called on a curried vector.
-func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
- vec, err := v.MetricVec.CurryWith(labels)
- if vec != nil {
- return &HistogramVec{vec}, err
- }
- return nil, err
-}
-
-// MustCurryWith works as CurryWith but panics where CurryWith would have
-// returned an error.
-func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
- vec, err := v.CurryWith(labels)
- if err != nil {
- panic(err)
- }
- return vec
-}
-
-type constHistogram struct {
- desc *Desc
- count uint64
- sum float64
- buckets map[float64]uint64
- labelPairs []*dto.LabelPair
- createdTs *timestamppb.Timestamp
-}
-
-func (h *constHistogram) Desc() *Desc {
- return h.desc
-}
-
-func (h *constHistogram) Write(out *dto.Metric) error {
- his := &dto.Histogram{
- CreatedTimestamp: h.createdTs,
- }
-
- buckets := make([]*dto.Bucket, 0, len(h.buckets))
-
- his.SampleCount = proto.Uint64(h.count)
- his.SampleSum = proto.Float64(h.sum)
- for upperBound, count := range h.buckets {
- buckets = append(buckets, &dto.Bucket{
- CumulativeCount: proto.Uint64(count),
- UpperBound: proto.Float64(upperBound),
- })
- }
-
- if len(buckets) > 0 {
- sort.Sort(buckSort(buckets))
- }
- his.Bucket = buckets
-
- out.Histogram = his
- out.Label = h.labelPairs
-
- return nil
-}
-
-// NewConstHistogram returns a metric representing a Prometheus histogram with
-// fixed values for the count, sum, and bucket counts. As those parameters
-// cannot be changed, the returned value does not implement the Histogram
-// interface (but only the Metric interface). Users of this package will not
-// have much use for it in regular operations. However, when implementing custom
-// Collectors, it is useful as a throw-away metric that is generated on the fly
-// to send it to Prometheus in the Collect method.
-//
-// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
-// bucket. The +Inf bucket is implicit, and its value is equal to the provided count.
-//
-// NewConstHistogram returns an error if the length of labelValues is not
-// consistent with the variable labels in Desc or if Desc is invalid.
-func NewConstHistogram(
- desc *Desc,
- count uint64,
- sum float64,
- buckets map[float64]uint64,
- labelValues ...string,
-) (Metric, error) {
- if desc.err != nil {
- return nil, desc.err
- }
- if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
- return nil, err
- }
- return &constHistogram{
- desc: desc,
- count: count,
- sum: sum,
- buckets: buckets,
- labelPairs: MakeLabelPairs(desc, labelValues),
- }, nil
-}
-
-// MustNewConstHistogram is a version of NewConstHistogram that panics where
-// NewConstHistogram would have returned an error.
-func MustNewConstHistogram(
- desc *Desc,
- count uint64,
- sum float64,
- buckets map[float64]uint64,
- labelValues ...string,
-) Metric {
- m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
- if err != nil {
- panic(err)
- }
- return m
-}
-
-// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp.
-func NewConstHistogramWithCreatedTimestamp(
- desc *Desc,
- count uint64,
- sum float64,
- buckets map[float64]uint64,
- ct time.Time,
- labelValues ...string,
-) (Metric, error) {
- if desc.err != nil {
- return nil, desc.err
- }
- if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
- return nil, err
- }
- return &constHistogram{
- desc: desc,
- count: count,
- sum: sum,
- buckets: buckets,
- labelPairs: MakeLabelPairs(desc, labelValues),
- createdTs: timestamppb.New(ct),
- }, nil
-}
-
-// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where
-// NewConstHistogramWithCreatedTimestamp would have returned an error.
-func MustNewConstHistogramWithCreatedTimestamp(
- desc *Desc,
- count uint64,
- sum float64,
- buckets map[float64]uint64,
- ct time.Time,
- labelValues ...string,
-) Metric {
- m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...)
- if err != nil {
- panic(err)
- }
- return m
-}
-
-type buckSort []*dto.Bucket
-
-func (s buckSort) Len() int {
- return len(s)
-}
-
-func (s buckSort) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s buckSort) Less(i, j int) bool {
- return s[i].GetUpperBound() < s[j].GetUpperBound()
-}
-
-// pickSchema returns the largest number n between -4 and 8 such that
-// 2^(2^-n) is less or equal the provided bucketFactor.
-//
-// Special cases:
-// - bucketFactor <= 1: panics.
-// - bucketFactor < 2^(2^-8) (but > 1): still returns 8.
-func pickSchema(bucketFactor float64) int32 {
- if bucketFactor <= 1 {
- panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor))
- }
- floor := math.Floor(math.Log2(math.Log2(bucketFactor)))
- switch {
- case floor <= -8:
- return 8
- case floor >= 4:
- return -4
- default:
- return -int32(floor)
- }
-}
-
-func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) {
- var ii []int
- buckets.Range(func(k, v interface{}) bool {
- ii = append(ii, k.(int))
- return true
- })
- sort.Ints(ii)
-
- if len(ii) == 0 {
- return nil, nil
- }
-
- var (
- spans []*dto.BucketSpan
- deltas []int64
- prevCount int64
- nextI int
- )
-
- appendDelta := func(count int64) {
- *spans[len(spans)-1].Length++
- deltas = append(deltas, count-prevCount)
- prevCount = count
- }
-
- for n, i := range ii {
- v, _ := buckets.Load(i)
- count := atomic.LoadInt64(v.(*int64))
- // Multiple spans with only small gaps in between are probably
- // encoded more efficiently as one larger span with a few empty
- // buckets. Needs some research to find the sweet spot. For now,
- // we assume that gaps of one or two buckets should not create
- // a new span.
- iDelta := int32(i - nextI)
- if n == 0 || iDelta > 2 {
- // We have to create a new span, either because we are
- // at the very beginning, or because we have found a gap
- // of more than two buckets.
- spans = append(spans, &dto.BucketSpan{
- Offset: proto.Int32(iDelta),
- Length: proto.Uint32(0),
- })
- } else {
- // We have found a small gap (or no gap at all).
- // Insert empty buckets as needed.
- for j := int32(0); j < iDelta; j++ {
- appendDelta(0)
- }
- }
- appendDelta(count)
- nextI = i + 1
- }
- return spans, deltas
-}
-
-// addToBucket increments the sparse bucket at key by the provided amount. It
-// returns true if a new sparse bucket had to be created for that.
-func addToBucket(buckets *sync.Map, key int, increment int64) bool {
- if existingBucket, ok := buckets.Load(key); ok {
- // Fast path without allocation.
- atomic.AddInt64(existingBucket.(*int64), increment)
- return false
- }
- // Bucket doesn't exist yet. Slow path allocating new counter.
- newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape.
- if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded {
- // The bucket was created concurrently in another goroutine.
- // Have to increment after all.
- atomic.AddInt64(actualBucket.(*int64), increment)
- return false
- }
- return true
-}
-
-// addAndReset returns a function to be used with sync.Map.Range of spare
-// buckets in coldCounts. It increments the buckets in the provided hotBuckets
-// according to the buckets ranged through. It then resets all buckets ranged
-// through to 0 (but leaves them in place so that they don't need to get
-// recreated on the next scrape).
-func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool {
- return func(k, v interface{}) bool {
- bucket := v.(*int64)
- if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) {
- atomic.AddUint32(bucketNumber, 1)
- }
- atomic.StoreInt64(bucket, 0)
- return true
- }
-}
-
-func deleteSyncMap(m *sync.Map) {
- m.Range(func(k, v interface{}) bool {
- m.Delete(k)
- return true
- })
-}
-
-func findSmallestKey(m *sync.Map) int {
- result := math.MaxInt32
- m.Range(func(k, v interface{}) bool {
- key := k.(int)
- if key < result {
- result = key
- }
- return true
- })
- return result
-}
-
-func getLe(key int, schema int32) float64 {
- // Here a bit of context about the behavior for the last bucket counting
- // regular numbers (called simply "last bucket" below) and the bucket
- // counting observations of ±Inf (called "inf bucket" below, with a key
- // one higher than that of the "last bucket"):
- //
- // If we apply the usual formula to the last bucket, its upper bound
- // would be calculated as +Inf. The reason is that the max possible
- // regular float64 number (math.MaxFloat64) doesn't coincide with one of
- // the calculated bucket boundaries. So the calculated boundary has to
- // be larger than math.MaxFloat64, and the only float64 larger than
- // math.MaxFloat64 is +Inf. However, we want to count actual
- // observations of ±Inf in the inf bucket. Therefore, we have to treat
- // the upper bound of the last bucket specially and set it to
- // math.MaxFloat64. (The upper bound of the inf bucket, with its key
- // being one higher than that of the last bucket, naturally comes out as
- // +Inf by the usual formula. So that's fine.)
- //
- // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of
- // 1024. If there were a float64 number following math.MaxFloat64, it
- // would have a frac of 1.0 and an exp of 1024, or equivalently a frac
- // of 0.5 and an exp of 1025. However, since frac must be smaller than
- // 1, and exp must be smaller than 1025, either representation overflows
- // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the
- // largest possible float64. Q.E.D.) However, the formula for
- // calculating the upper bound from the idx and schema of the last
- // bucket results in precisely that. It is either frac=1.0 & exp=1024
- // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is,
- // by the way, a power of two where the exponent itself is a power of
- // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all
- // schemas.) So these are the special cases we have to catch below.
- if schema < 0 {
- exp := key << -schema
- if exp == 1024 {
- // This is the last bucket before the overflow bucket
- // (for ±Inf observations). Return math.MaxFloat64 as
- // explained above.
- return math.MaxFloat64
- }
- return math.Ldexp(1, exp)
- }
-
- fracIdx := key & ((1 << schema) - 1)
- frac := nativeHistogramBounds[schema][fracIdx]
- exp := (key >> schema) + 1
- if frac == 0.5 && exp == 1025 {
- // This is the last bucket before the overflow bucket (for ±Inf
- // observations). Return math.MaxFloat64 as explained above.
- return math.MaxFloat64
- }
- return math.Ldexp(frac, exp)
-}
-
-// waitForCooldown returns after the count field in the provided histogramCounts
-// has reached the provided count value.
-func waitForCooldown(count uint64, counts *histogramCounts) {
- for count != atomic.LoadUint64(&counts.count) {
- runtime.Gosched() // Let observations get work done.
- }
-}
-
-// atomicAddFloat adds the provided float atomically to another float
-// represented by the bit pattern the bits pointer is pointing to.
-func atomicAddFloat(bits *uint64, v float64) {
- for {
- loadedBits := atomic.LoadUint64(bits)
- newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
- if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
- break
- }
- }
-}
-
-// atomicDecUint32 atomically decrements the uint32 p points to. See
-// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done.
-func atomicDecUint32(p *uint32) {
- atomic.AddUint32(p, ^uint32(0))
-}
-
-// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero
-// bucket) from the cold counts to the corresponding fields in the hot
-// counts. Those fields are then reset to 0 in the cold counts.
-func addAndResetCounts(hot, cold *histogramCounts) {
- atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count))
- atomic.StoreUint64(&cold.count, 0)
- coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits))
- atomicAddFloat(&hot.sumBits, coldSum)
- atomic.StoreUint64(&cold.sumBits, 0)
- for i := range hot.buckets {
- atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i]))
- atomic.StoreUint64(&cold.buckets[i], 0)
- }
- atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket))
- atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0)
-}
-
-type nativeExemplars struct {
- sync.Mutex
-
- // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0.
- // The ttl is used on insertion to remove an exemplar that is older than ttl, if present.
- ttl time.Duration
-
- exemplars []*dto.Exemplar
-}
-
-func (n *nativeExemplars) isEnabled() bool {
- return n.ttl != -1
-}
-
-func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars {
- if ttl == 0 {
- ttl = 5 * time.Minute
- }
-
- if maxCount == 0 {
- maxCount = 10
- }
-
- if maxCount < 0 {
- maxCount = 0
- ttl = -1
- }
-
- return nativeExemplars{
- ttl: ttl,
- exemplars: make([]*dto.Exemplar, 0, maxCount),
- }
-}
-
-func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
- if !n.isEnabled() {
- return
- }
-
- n.Lock()
- defer n.Unlock()
-
- // When the number of exemplars has not yet exceeded or
- // is equal to cap(n.exemplars), then
- // insert the new exemplar directly.
- if len(n.exemplars) < cap(n.exemplars) {
- var nIdx int
- for nIdx = 0; nIdx < len(n.exemplars); nIdx++ {
- if *e.Value < *n.exemplars[nIdx].Value {
- break
- }
- }
- n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)
- return
- }
-
- if len(n.exemplars) == 1 {
- // When the number of exemplars is 1, then
- // replace the existing exemplar with the new exemplar.
- n.exemplars[0] = e
- return
- }
- // From this point on, the number of exemplars is greater than 1.
-
- // When the number of exemplars exceeds the limit, remove one exemplar.
- var (
- ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop.
- otIdx = -1 // Index of the exemplar with the oldest timestamp.
-
- md = -1.0 // Logarithm of the delta of the closest pair of exemplars.
-
- // The insertion point of the new exemplar in the exemplars slice after insertion.
- // This is calculated purely based on the order of the exemplars by value.
- // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end.
- nIdx = -1
-
- // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar.
- // The aim is to keep a good spread of exemplars by value and not let them bunch up too much.
- // It is calculated in 3 steps:
- // 1. First we set rIdx to the index of the older exemplar within the closest pair by value.
- // That is the following will be true (on log scale):
- // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have
- // the closest values to each other from all pairs.
- // For example, suppose the values are distributed like this:
- // |-----------x-------------x----------------x----x-----|
- // ^--rIdx as this is older.
- // Or like this:
- // |-----------x-------------x----------------x----x-----|
- // ^--rIdx as this is older.
- // 2. If there is an exemplar that expired, then we simple reset rIdx to that index.
- // 3. We check if by inserting the new exemplar we would create a closer pair at
- // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to
- // keep the spread of exemplars by value; otherwise we keep rIdx as it is.
- rIdx = -1
- cLog float64 // Logarithm of the current exemplar.
- pLog float64 // Logarithm of the previous exemplar.
- )
-
- for i, exemplar := range n.exemplars {
- // Find the exemplar with the oldest timestamp.
- if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) {
- ot = exemplar.Timestamp.AsTime()
- otIdx = i
- }
-
- // Find the index at which to insert new the exemplar.
- if nIdx == -1 && *e.Value <= *exemplar.Value {
- nIdx = i
- }
-
- // Find the two closest exemplars and pick the one the with older timestamp.
- pLog = cLog
- cLog = math.Log(exemplar.GetValue())
- if i == 0 {
- continue
- }
- diff := math.Abs(cLog - pLog)
- if md == -1 || diff < md {
- // The closest exemplar pair is at index: i-1, i.
- // Choose the exemplar with the older timestamp for replacement.
- md = diff
- if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) {
- rIdx = i
- } else {
- rIdx = i - 1
- }
- }
-
- }
-
- // If all existing exemplar are smaller than new exemplar,
- // then the exemplar should be inserted at the end.
- if nIdx == -1 {
- nIdx = len(n.exemplars)
- }
- // Here, we have the following relationships:
- // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0)
- // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars))
-
- if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl {
- // If the oldest exemplar has expired, then replace it with the new exemplar.
- rIdx = otIdx
- } else {
- // In the previous for loop, when calculating the closest pair of exemplars,
- // we did not take into account the newly inserted exemplar.
- // So we need to calculate with the newly inserted exemplar again.
- elog := math.Log(e.GetValue())
- if nIdx > 0 {
- diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue()))
- if diff < md {
- // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx.
- // v--rIdx
- // |-----------x-n-----------x----------------x----x-----|
- // nIdx-1--^ ^--new exemplar value
- // Do not make the spread worse, replace nIdx-1 and not rIdx.
- md = diff
- rIdx = nIdx - 1
- }
- }
- if nIdx < len(n.exemplars) {
- diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog)
- if diff < md {
- // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx.
- // v--rIdx
- // |-----------x-----------n-x----------------x----x-----|
- // new exemplar value--^ ^--nIdx
- // Do not make the spread worse, replace nIdx-1 and not rIdx.
- rIdx = nIdx
- }
- }
- }
-
- // Adjust the slice according to rIdx and nIdx.
- switch {
- case rIdx == nIdx:
- n.exemplars[nIdx] = e
- case rIdx < nIdx:
- n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...)
- case rIdx > nIdx:
- n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...)
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
deleted file mode 100644
index 1ed5abe74..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (c) 2015 Björn Rabenstein
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-//
-// The code in this package is copy/paste to avoid a dependency. Hence this file
-// carries the copyright of the original repo.
-// https://github.com/beorn7/floats
-package internal
-
-import (
- "math"
-)
-
-// minNormalFloat64 is the smallest positive normal value of type float64.
-var minNormalFloat64 = math.Float64frombits(0x0010000000000000)
-
-// AlmostEqualFloat64 returns true if a and b are equal within a relative error
-// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the
-// details of the applied method.
-func AlmostEqualFloat64(a, b, epsilon float64) bool {
- if a == b {
- return true
- }
- absA := math.Abs(a)
- absB := math.Abs(b)
- diff := math.Abs(a - b)
- if a == 0 || b == 0 || absA+absB < minNormalFloat64 {
- return diff < epsilon*minNormalFloat64
- }
- return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon
-}
-
-// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64.
-func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool {
- if len(a) != len(b) {
- return false
- }
- for i := range a {
- if !AlmostEqualFloat64(a[i], b[i], epsilon) {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
deleted file mode 100644
index a595a2036..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
+++ /dev/null
@@ -1,654 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// It provides tools to compare sequences of strings and generate textual diffs.
-//
-// Maintaining `GetUnifiedDiffString` here because original repository
-// (https://github.com/pmezard/go-difflib) is no longer maintained.
-package internal
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strings"
-)
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
-func max(a, b int) int {
- if a > b {
- return a
- }
- return b
-}
-
-func calculateRatio(matches, length int) float64 {
- if length > 0 {
- return 2.0 * float64(matches) / float64(length)
- }
- return 1.0
-}
-
-type Match struct {
- A int
- B int
- Size int
-}
-
-type OpCode struct {
- Tag byte
- I1 int
- I2 int
- J1 int
- J2 int
-}
-
-// SequenceMatcher compares sequence of strings. The basic
-// algorithm predates, and is a little fancier than, an algorithm
-// published in the late 1980's by Ratcliff and Obershelp under the
-// hyperbolic name "gestalt pattern matching". The basic idea is to find
-// the longest contiguous matching subsequence that contains no "junk"
-// elements (R-O doesn't address junk). The same idea is then applied
-// recursively to the pieces of the sequences to the left and to the right
-// of the matching subsequence. This does not yield minimal edit
-// sequences, but does tend to yield matches that "look right" to people.
-//
-// SequenceMatcher tries to compute a "human-friendly diff" between two
-// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
-// longest *contiguous* & junk-free matching subsequence. That's what
-// catches peoples' eyes. The Windows(tm) windiff has another interesting
-// notion, pairing up elements that appear uniquely in each sequence.
-// That, and the method here, appear to yield more intuitive difference
-// reports than does diff. This method appears to be the least vulnerable
-// to synching up on blocks of "junk lines", though (like blank lines in
-// ordinary text files, or maybe "" lines in HTML files). That may be
-// because this is the only method of the 3 that has a *concept* of
-// "junk" .
-//
-// Timing: Basic R-O is cubic time worst case and quadratic time expected
-// case. SequenceMatcher is quadratic time for the worst case and has
-// expected-case behavior dependent in a complicated way on how many
-// elements the sequences have in common; best case time is linear.
-type SequenceMatcher struct {
- a []string
- b []string
- b2j map[string][]int
- IsJunk func(string) bool
- autoJunk bool
- bJunk map[string]struct{}
- matchingBlocks []Match
- fullBCount map[string]int
- bPopular map[string]struct{}
- opCodes []OpCode
-}
-
-func NewMatcher(a, b []string) *SequenceMatcher {
- m := SequenceMatcher{autoJunk: true}
- m.SetSeqs(a, b)
- return &m
-}
-
-func NewMatcherWithJunk(a, b []string, autoJunk bool,
- isJunk func(string) bool,
-) *SequenceMatcher {
- m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
- m.SetSeqs(a, b)
- return &m
-}
-
-// Set two sequences to be compared.
-func (m *SequenceMatcher) SetSeqs(a, b []string) {
- m.SetSeq1(a)
- m.SetSeq2(b)
-}
-
-// Set the first sequence to be compared. The second sequence to be compared is
-// not changed.
-//
-// SequenceMatcher computes and caches detailed information about the second
-// sequence, so if you want to compare one sequence S against many sequences,
-// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
-// sequences.
-//
-// See also SetSeqs() and SetSeq2().
-func (m *SequenceMatcher) SetSeq1(a []string) {
- if &a == &m.a {
- return
- }
- m.a = a
- m.matchingBlocks = nil
- m.opCodes = nil
-}
-
-// Set the second sequence to be compared. The first sequence to be compared is
-// not changed.
-func (m *SequenceMatcher) SetSeq2(b []string) {
- if &b == &m.b {
- return
- }
- m.b = b
- m.matchingBlocks = nil
- m.opCodes = nil
- m.fullBCount = nil
- m.chainB()
-}
-
-func (m *SequenceMatcher) chainB() {
- // Populate line -> index mapping
- b2j := map[string][]int{}
- for i, s := range m.b {
- indices := b2j[s]
- indices = append(indices, i)
- b2j[s] = indices
- }
-
- // Purge junk elements
- m.bJunk = map[string]struct{}{}
- if m.IsJunk != nil {
- junk := m.bJunk
- for s := range b2j {
- if m.IsJunk(s) {
- junk[s] = struct{}{}
- }
- }
- for s := range junk {
- delete(b2j, s)
- }
- }
-
- // Purge remaining popular elements
- popular := map[string]struct{}{}
- n := len(m.b)
- if m.autoJunk && n >= 200 {
- ntest := n/100 + 1
- for s, indices := range b2j {
- if len(indices) > ntest {
- popular[s] = struct{}{}
- }
- }
- for s := range popular {
- delete(b2j, s)
- }
- }
- m.bPopular = popular
- m.b2j = b2j
-}
-
-func (m *SequenceMatcher) isBJunk(s string) bool {
- _, ok := m.bJunk[s]
- return ok
-}
-
-// Find longest matching block in a[alo:ahi] and b[blo:bhi].
-//
-// If IsJunk is not defined:
-//
-// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
-//
-// alo <= i <= i+k <= ahi
-// blo <= j <= j+k <= bhi
-//
-// and for all (i',j',k') meeting those conditions,
-//
-// k >= k'
-// i <= i'
-// and if i == i', j <= j'
-//
-// In other words, of all maximal matching blocks, return one that
-// starts earliest in a, and of all those maximal matching blocks that
-// start earliest in a, return the one that starts earliest in b.
-//
-// If IsJunk is defined, first the longest matching block is
-// determined as above, but with the additional restriction that no
-// junk element appears in the block. Then that block is extended as
-// far as possible by matching (only) junk elements on both sides. So
-// the resulting block never matches on junk except as identical junk
-// happens to be adjacent to an "interesting" match.
-//
-// If no blocks match, return (alo, blo, 0).
-func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
- // CAUTION: stripping common prefix or suffix would be incorrect.
- // E.g.,
- // ab
- // acab
- // Longest matching block is "ab", but if common prefix is
- // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
- // strip, so ends up claiming that ab is changed to acab by
- // inserting "ca" in the middle. That's minimal but unintuitive:
- // "it's obvious" that someone inserted "ac" at the front.
- // Windiff ends up at the same place as diff, but by pairing up
- // the unique 'b's and then matching the first two 'a's.
- besti, bestj, bestsize := alo, blo, 0
-
- // find longest junk-free match
- // during an iteration of the loop, j2len[j] = length of longest
- // junk-free match ending with a[i-1] and b[j]
- j2len := map[int]int{}
- for i := alo; i != ahi; i++ {
- // look at all instances of a[i] in b; note that because
- // b2j has no junk keys, the loop is skipped if a[i] is junk
- newj2len := map[int]int{}
- for _, j := range m.b2j[m.a[i]] {
- // a[i] matches b[j]
- if j < blo {
- continue
- }
- if j >= bhi {
- break
- }
- k := j2len[j-1] + 1
- newj2len[j] = k
- if k > bestsize {
- besti, bestj, bestsize = i-k+1, j-k+1, k
- }
- }
- j2len = newj2len
- }
-
- // Extend the best by non-junk elements on each end. In particular,
- // "popular" non-junk elements aren't in b2j, which greatly speeds
- // the inner loop above, but also means "the best" match so far
- // doesn't contain any junk *or* popular non-junk elements.
- for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
- m.a[besti-1] == m.b[bestj-1] {
- besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
- }
- for besti+bestsize < ahi && bestj+bestsize < bhi &&
- !m.isBJunk(m.b[bestj+bestsize]) &&
- m.a[besti+bestsize] == m.b[bestj+bestsize] {
- bestsize++
- }
-
- // Now that we have a wholly interesting match (albeit possibly
- // empty!), we may as well suck up the matching junk on each
- // side of it too. Can't think of a good reason not to, and it
- // saves post-processing the (possibly considerable) expense of
- // figuring out what to do with it. In the case of an empty
- // interesting match, this is clearly the right thing to do,
- // because no other kind of match is possible in the regions.
- for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
- m.a[besti-1] == m.b[bestj-1] {
- besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
- }
- for besti+bestsize < ahi && bestj+bestsize < bhi &&
- m.isBJunk(m.b[bestj+bestsize]) &&
- m.a[besti+bestsize] == m.b[bestj+bestsize] {
- bestsize++
- }
-
- return Match{A: besti, B: bestj, Size: bestsize}
-}
-
-// Return list of triples describing matching subsequences.
-//
-// Each triple is of the form (i, j, n), and means that
-// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
-// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
-// adjacent triples in the list, and the second is not the last triple in the
-// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
-// adjacent equal blocks.
-//
-// The last triple is a dummy, (len(a), len(b), 0), and is the only
-// triple with n==0.
-func (m *SequenceMatcher) GetMatchingBlocks() []Match {
- if m.matchingBlocks != nil {
- return m.matchingBlocks
- }
-
- var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
- matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
- match := m.findLongestMatch(alo, ahi, blo, bhi)
- i, j, k := match.A, match.B, match.Size
- if match.Size > 0 {
- if alo < i && blo < j {
- matched = matchBlocks(alo, i, blo, j, matched)
- }
- matched = append(matched, match)
- if i+k < ahi && j+k < bhi {
- matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
- }
- }
- return matched
- }
- matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
-
- // It's possible that we have adjacent equal blocks in the
- // matching_blocks list now.
- nonAdjacent := []Match{}
- i1, j1, k1 := 0, 0, 0
- for _, b := range matched {
- // Is this block adjacent to i1, j1, k1?
- i2, j2, k2 := b.A, b.B, b.Size
- if i1+k1 == i2 && j1+k1 == j2 {
- // Yes, so collapse them -- this just increases the length of
- // the first block by the length of the second, and the first
- // block so lengthened remains the block to compare against.
- k1 += k2
- } else {
- // Not adjacent. Remember the first block (k1==0 means it's
- // the dummy we started with), and make the second block the
- // new block to compare against.
- if k1 > 0 {
- nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
- }
- i1, j1, k1 = i2, j2, k2
- }
- }
- if k1 > 0 {
- nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
- }
-
- nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
- m.matchingBlocks = nonAdjacent
- return m.matchingBlocks
-}
-
-// Return list of 5-tuples describing how to turn a into b.
-//
-// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
-// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
-// tuple preceding it, and likewise for j1 == the previous j2.
-//
-// The tags are characters, with these meanings:
-//
-// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
-//
-// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
-//
-// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
-//
-// 'e' (equal): a[i1:i2] == b[j1:j2]
-func (m *SequenceMatcher) GetOpCodes() []OpCode {
- if m.opCodes != nil {
- return m.opCodes
- }
- i, j := 0, 0
- matching := m.GetMatchingBlocks()
- opCodes := make([]OpCode, 0, len(matching))
- for _, m := range matching {
- // invariant: we've pumped out correct diffs to change
- // a[:i] into b[:j], and the next matching block is
- // a[ai:ai+size] == b[bj:bj+size]. So we need to pump
- // out a diff to change a[i:ai] into b[j:bj], pump out
- // the matching block, and move (i,j) beyond the match
- ai, bj, size := m.A, m.B, m.Size
- tag := byte(0)
- if i < ai && j < bj {
- tag = 'r'
- } else if i < ai {
- tag = 'd'
- } else if j < bj {
- tag = 'i'
- }
- if tag > 0 {
- opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
- }
- i, j = ai+size, bj+size
- // the list of matching blocks is terminated by a
- // sentinel with size 0
- if size > 0 {
- opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
- }
- }
- m.opCodes = opCodes
- return m.opCodes
-}
-
-// Isolate change clusters by eliminating ranges with no changes.
-//
-// Return a generator of groups with up to n lines of context.
-// Each group is in the same format as returned by GetOpCodes().
-func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
- if n < 0 {
- n = 3
- }
- codes := m.GetOpCodes()
- if len(codes) == 0 {
- codes = []OpCode{{'e', 0, 1, 0, 1}}
- }
- // Fixup leading and trailing groups if they show no changes.
- if codes[0].Tag == 'e' {
- c := codes[0]
- i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
- codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
- }
- if codes[len(codes)-1].Tag == 'e' {
- c := codes[len(codes)-1]
- i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
- codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
- }
- nn := n + n
- groups := [][]OpCode{}
- group := []OpCode{}
- for _, c := range codes {
- i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
- // End the current group and start a new one whenever
- // there is a large range with no changes.
- if c.Tag == 'e' && i2-i1 > nn {
- group = append(group, OpCode{
- c.Tag, i1, min(i2, i1+n),
- j1, min(j2, j1+n),
- })
- groups = append(groups, group)
- group = []OpCode{}
- i1, j1 = max(i1, i2-n), max(j1, j2-n)
- }
- group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
- }
- if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
- groups = append(groups, group)
- }
- return groups
-}
-
-// Return a measure of the sequences' similarity (float in [0,1]).
-//
-// Where T is the total number of elements in both sequences, and
-// M is the number of matches, this is 2.0*M / T.
-// Note that this is 1 if the sequences are identical, and 0 if
-// they have nothing in common.
-//
-// .Ratio() is expensive to compute if you haven't already computed
-// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
-// want to try .QuickRatio() or .RealQuickRation() first to get an
-// upper bound.
-func (m *SequenceMatcher) Ratio() float64 {
- matches := 0
- for _, m := range m.GetMatchingBlocks() {
- matches += m.Size
- }
- return calculateRatio(matches, len(m.a)+len(m.b))
-}
-
-// Return an upper bound on ratio() relatively quickly.
-//
-// This isn't defined beyond that it is an upper bound on .Ratio(), and
-// is faster to compute.
-func (m *SequenceMatcher) QuickRatio() float64 {
- // viewing a and b as multisets, set matches to the cardinality
- // of their intersection; this counts the number of matches
- // without regard to order, so is clearly an upper bound
- if m.fullBCount == nil {
- m.fullBCount = map[string]int{}
- for _, s := range m.b {
- m.fullBCount[s]++
- }
- }
-
- // avail[x] is the number of times x appears in 'b' less the
- // number of times we've seen it in 'a' so far ... kinda
- avail := map[string]int{}
- matches := 0
- for _, s := range m.a {
- n, ok := avail[s]
- if !ok {
- n = m.fullBCount[s]
- }
- avail[s] = n - 1
- if n > 0 {
- matches++
- }
- }
- return calculateRatio(matches, len(m.a)+len(m.b))
-}
-
-// Return an upper bound on ratio() very quickly.
-//
-// This isn't defined beyond that it is an upper bound on .Ratio(), and
-// is faster to compute than either .Ratio() or .QuickRatio().
-func (m *SequenceMatcher) RealQuickRatio() float64 {
- la, lb := len(m.a), len(m.b)
- return calculateRatio(min(la, lb), la+lb)
-}
-
-// Convert range to the "ed" format
-func formatRangeUnified(start, stop int) string {
- // Per the diff spec at http://www.unix.org/single_unix_specification/
- beginning := start + 1 // lines start numbering with one
- length := stop - start
- if length == 1 {
- return fmt.Sprintf("%d", beginning)
- }
- if length == 0 {
- beginning-- // empty ranges begin at line just before the range
- }
- return fmt.Sprintf("%d,%d", beginning, length)
-}
-
-// Unified diff parameters
-type UnifiedDiff struct {
- A []string // First sequence lines
- FromFile string // First file name
- FromDate string // First file time
- B []string // Second sequence lines
- ToFile string // Second file name
- ToDate string // Second file time
- Eol string // Headers end of line, defaults to LF
- Context int // Number of context lines
-}
-
-// Compare two sequences of lines; generate the delta as a unified diff.
-//
-// Unified diffs are a compact way of showing line changes and a few
-// lines of context. The number of context lines is set by 'n' which
-// defaults to three.
-//
-// By default, the diff control lines (those with ---, +++, or @@) are
-// created with a trailing newline. This is helpful so that inputs
-// created from file.readlines() result in diffs that are suitable for
-// file.writelines() since both the inputs and outputs have trailing
-// newlines.
-//
-// For inputs that do not have trailing newlines, set the lineterm
-// argument to "" so that the output will be uniformly newline free.
-//
-// The unidiff format normally has a header for filenames and modification
-// times. Any or all of these may be specified using strings for
-// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
-// The modification times are normally expressed in the ISO 8601 format.
-func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
- buf := bufio.NewWriter(writer)
- defer buf.Flush()
- wf := func(format string, args ...interface{}) error {
- _, err := buf.WriteString(fmt.Sprintf(format, args...))
- return err
- }
- ws := func(s string) error {
- _, err := buf.WriteString(s)
- return err
- }
-
- if len(diff.Eol) == 0 {
- diff.Eol = "\n"
- }
-
- started := false
- m := NewMatcher(diff.A, diff.B)
- for _, g := range m.GetGroupedOpCodes(diff.Context) {
- if !started {
- started = true
- fromDate := ""
- if len(diff.FromDate) > 0 {
- fromDate = "\t" + diff.FromDate
- }
- toDate := ""
- if len(diff.ToDate) > 0 {
- toDate = "\t" + diff.ToDate
- }
- if diff.FromFile != "" || diff.ToFile != "" {
- err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
- if err != nil {
- return err
- }
- err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
- if err != nil {
- return err
- }
- }
- }
- first, last := g[0], g[len(g)-1]
- range1 := formatRangeUnified(first.I1, last.I2)
- range2 := formatRangeUnified(first.J1, last.J2)
- if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
- return err
- }
- for _, c := range g {
- i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
- if c.Tag == 'e' {
- for _, line := range diff.A[i1:i2] {
- if err := ws(" " + line); err != nil {
- return err
- }
- }
- continue
- }
- if c.Tag == 'r' || c.Tag == 'd' {
- for _, line := range diff.A[i1:i2] {
- if err := ws("-" + line); err != nil {
- return err
- }
- }
- }
- if c.Tag == 'r' || c.Tag == 'i' {
- for _, line := range diff.B[j1:j2] {
- if err := ws("+" + line); err != nil {
- return err
- }
- }
- }
- }
- }
- return nil
-}
-
-// Like WriteUnifiedDiff but returns the diff a string.
-func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
- w := &bytes.Buffer{}
- err := WriteUnifiedDiff(w, diff)
- return w.String(), err
-}
-
-// Split a string on "\n" while preserving them. The output can be used
-// as input for UnifiedDiff and ContextDiff structures.
-func SplitLines(s string) []string {
- lines := strings.SplitAfter(s, "\n")
- lines[len(lines)-1] += "\n"
- return lines
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
deleted file mode 100644
index a4fa6eabd..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2021 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import "regexp"
-
-type GoCollectorRule struct {
- Matcher *regexp.Regexp
- Deny bool
-}
-
-// GoCollectorOptions should not be used be directly by anything, except `collectors` package.
-// Use it via collectors package instead. See issue
-// https://github.com/prometheus/client_golang/issues/1030.
-//
-// This is internal, so external users only can use it via `collector.WithGoCollector*` methods
-type GoCollectorOptions struct {
- DisableMemStatsLikeMetrics bool
- RuntimeMetricSumForHist map[string]string
- RuntimeMetricRules []GoCollectorRule
-}
-
-var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
deleted file mode 100644
index 97d17d6cb..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2021 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.17
-// +build go1.17
-
-package internal
-
-import (
- "math"
- "path"
- "runtime/metrics"
- "strings"
-
- "github.com/prometheus/common/model"
-)
-
-// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics
-// metric description and validates whether the metric is suitable for integration
-// with Prometheus.
-//
-// Returns false if a name could not be produced, or if Prometheus does not understand
-// the runtime/metrics Kind.
-//
-// Note that the main reason a name couldn't be produced is if the runtime/metrics
-// package exports a name with characters outside the valid Prometheus metric name
-// character set. This is theoretically possible, but should never happen in practice.
-// Still, don't rely on it.
-func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
- namespace := "go"
-
- comp := strings.SplitN(d.Name, ":", 2)
- key := comp[0]
- unit := comp[1]
-
- // The last path element in the key is the name,
- // the rest is the subsystem.
- subsystem := path.Dir(key[1:] /* remove leading / */)
- name := path.Base(key)
-
- // subsystem is translated by replacing all / and - with _.
- subsystem = strings.ReplaceAll(subsystem, "/", "_")
- subsystem = strings.ReplaceAll(subsystem, "-", "_")
-
- // unit is translated assuming that the unit contains no
- // non-ASCII characters.
- unit = strings.ReplaceAll(unit, "-", "_")
- unit = strings.ReplaceAll(unit, "*", "_")
- unit = strings.ReplaceAll(unit, "/", "_per_")
-
- // name has - replaced with _ and is concatenated with the unit and
- // other data.
- name = strings.ReplaceAll(name, "-", "_")
- name += "_" + unit
- if d.Cumulative && d.Kind != metrics.KindFloat64Histogram {
- name += "_total"
- }
-
- valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
- switch d.Kind {
- case metrics.KindUint64:
- case metrics.KindFloat64:
- case metrics.KindFloat64Histogram:
- default:
- valid = false
- }
- return namespace, subsystem, name, valid
-}
-
-// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
-// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
-// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
-// as the bottom-most upper-bound inclusive bucket in Prometheus.
-func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
- switch unit {
- case "bytes":
- // Re-bucket as powers of 2.
- return reBucketExp(buckets, 2)
- case "seconds":
- // Re-bucket as powers of 10 and then merge all buckets greater
- // than 1 second into the +Inf bucket.
- b := reBucketExp(buckets, 10)
- for i := range b {
- if b[i] <= 1 {
- continue
- }
- b[i] = math.Inf(1)
- b = b[:i+1]
- break
- }
- return b
- }
- return buckets
-}
-
-// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and
-// downsamples the buckets to those a multiple of base apart. The end result
-// is a roughly exponential (in many cases, perfectly exponential) bucketing
-// scheme.
-func reBucketExp(buckets []float64, base float64) []float64 {
- bucket := buckets[0]
- var newBuckets []float64
- // We may see a -Inf here, in which case, add it and skip it
- // since we risk producing NaNs otherwise.
- //
- // We need to preserve -Inf values to maintain runtime/metrics
- // conventions. We'll strip it out later.
- if bucket == math.Inf(-1) {
- newBuckets = append(newBuckets, bucket)
- buckets = buckets[1:]
- bucket = buckets[0]
- }
- // From now on, bucket should always have a non-Inf value because
- // Infs are only ever at the ends of the bucket lists, so
- // arithmetic operations on it are non-NaN.
- for i := 1; i < len(buckets); i++ {
- if bucket >= 0 && buckets[i] < bucket*base {
- // The next bucket we want to include is at least bucket*base.
- continue
- } else if bucket < 0 && buckets[i] < bucket/base {
- // In this case the bucket we're targeting is negative, and since
- // we're ascending through buckets here, we need to divide to get
- // closer to zero exponentially.
- continue
- }
- // The +Inf bucket will always be the last one, and we'll always
- // end up including it here because bucket
- newBuckets = append(newBuckets, bucket)
- bucket = buckets[i]
- }
- return append(newBuckets, bucket)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
deleted file mode 100644
index 6515c1148..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "sort"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// LabelPairSorter implements sort.Interface. It is used to sort a slice of
-// dto.LabelPair pointers.
-type LabelPairSorter []*dto.LabelPair
-
-func (s LabelPairSorter) Len() int {
- return len(s)
-}
-
-func (s LabelPairSorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s LabelPairSorter) Less(i, j int) bool {
- return s[i].GetName() < s[j].GetName()
-}
-
-// MetricSorter is a sortable slice of *dto.Metric.
-type MetricSorter []*dto.Metric
-
-func (s MetricSorter) Len() int {
- return len(s)
-}
-
-func (s MetricSorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s MetricSorter) Less(i, j int) bool {
- if len(s[i].Label) != len(s[j].Label) {
- // This should not happen. The metrics are
- // inconsistent. However, we have to deal with the fact, as
- // people might use custom collectors or metric family injection
- // to create inconsistent metrics. So let's simply compare the
- // number of labels in this case. That will still yield
- // reproducible sorting.
- return len(s[i].Label) < len(s[j].Label)
- }
- for n, lp := range s[i].Label {
- vi := lp.GetValue()
- vj := s[j].Label[n].GetValue()
- if vi != vj {
- return vi < vj
- }
- }
-
- // We should never arrive here. Multiple metrics with the same
- // label set in the same scrape will lead to undefined ingestion
- // behavior. However, as above, we have to provide stable sorting
- // here, even for inconsistent metrics. So sort equal metrics
- // by their timestamp, with missing timestamps (implying "now")
- // coming last.
- if s[i].TimestampMs == nil {
- return false
- }
- if s[j].TimestampMs == nil {
- return true
- }
- return s[i].GetTimestampMs() < s[j].GetTimestampMs()
-}
-
-// NormalizeMetricFamilies returns a MetricFamily slice with empty
-// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
-// the slice, with the contained Metrics sorted within each MetricFamily.
-func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
- for _, mf := range metricFamiliesByName {
- sort.Sort(MetricSorter(mf.Metric))
- }
- names := make([]string, 0, len(metricFamiliesByName))
- for name, mf := range metricFamiliesByName {
- if len(mf.Metric) > 0 {
- names = append(names, name)
- }
- }
- sort.Strings(names)
- result := make([]*dto.MetricFamily, 0, len(names))
- for _, name := range names {
- result = append(result, metricFamiliesByName[name])
- }
- return result
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
deleted file mode 100644
index c21911f29..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "errors"
- "fmt"
- "strings"
- "unicode/utf8"
-
- "github.com/prometheus/common/model"
-)
-
-// Labels represents a collection of label name -> value mappings. This type is
-// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
-// metric vector Collectors, e.g.:
-//
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-//
-// The other use-case is the specification of constant label pairs in Opts or to
-// create a Desc.
-type Labels map[string]string
-
-// LabelConstraint normalizes label values.
-type LabelConstraint func(string) string
-
-// ConstrainedLabels represents a label name and its constrain function
-// to normalize label values. This type is commonly used when constructing
-// metric vector Collectors.
-type ConstrainedLabel struct {
- Name string
- Constraint LabelConstraint
-}
-
-// ConstrainableLabels is an interface that allows creating of labels that can
-// be optionally constrained.
-//
-// prometheus.V2().NewCounterVec(CounterVecOpts{
-// CounterOpts: {...}, // Usual CounterOpts fields
-// VariableLabels: []ConstrainedLabels{
-// {Name: "A"},
-// {Name: "B", Constraint: func(v string) string { ... }},
-// },
-// })
-type ConstrainableLabels interface {
- compile() *compiledLabels
- labelNames() []string
-}
-
-// ConstrainedLabels represents a collection of label name -> constrain function
-// to normalize label values. This type is commonly used when constructing
-// metric vector Collectors.
-type ConstrainedLabels []ConstrainedLabel
-
-func (cls ConstrainedLabels) compile() *compiledLabels {
- compiled := &compiledLabels{
- names: make([]string, len(cls)),
- labelConstraints: map[string]LabelConstraint{},
- }
-
- for i, label := range cls {
- compiled.names[i] = label.Name
- if label.Constraint != nil {
- compiled.labelConstraints[label.Name] = label.Constraint
- }
- }
-
- return compiled
-}
-
-func (cls ConstrainedLabels) labelNames() []string {
- names := make([]string, len(cls))
- for i, label := range cls {
- names[i] = label.Name
- }
- return names
-}
-
-// UnconstrainedLabels represents collection of label without any constraint on
-// their value. Thus, it is simply a collection of label names.
-//
-// UnconstrainedLabels([]string{ "A", "B" })
-//
-// is equivalent to
-//
-// ConstrainedLabels {
-// { Name: "A" },
-// { Name: "B" },
-// }
-type UnconstrainedLabels []string
-
-func (uls UnconstrainedLabels) compile() *compiledLabels {
- return &compiledLabels{
- names: uls,
- }
-}
-
-func (uls UnconstrainedLabels) labelNames() []string {
- return uls
-}
-
-type compiledLabels struct {
- names []string
- labelConstraints map[string]LabelConstraint
-}
-
-func (cls *compiledLabels) compile() *compiledLabels {
- return cls
-}
-
-func (cls *compiledLabels) labelNames() []string {
- return cls.names
-}
-
-func (cls *compiledLabels) constrain(labelName, value string) string {
- if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil {
- return fn(value)
- }
- return value
-}
-
-// reservedLabelPrefix is a prefix which is not legal in user-supplied
-// label names.
-const reservedLabelPrefix = "__"
-
-var errInconsistentCardinality = errors.New("inconsistent label cardinality")
-
-func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
- return fmt.Errorf(
- "%w: %q has %d variable labels named %q but %d values %q were provided",
- errInconsistentCardinality, fqName,
- len(labels), labels,
- len(labelValues), labelValues,
- )
-}
-
-func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
- if len(labels) != expectedNumberOfValues {
- return fmt.Errorf(
- "%w: expected %d label values but got %d in %#v",
- errInconsistentCardinality, expectedNumberOfValues,
- len(labels), labels,
- )
- }
-
- for name, val := range labels {
- if !utf8.ValidString(val) {
- return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
- }
- }
-
- return nil
-}
-
-func validateLabelValues(vals []string, expectedNumberOfValues int) error {
- if len(vals) != expectedNumberOfValues {
- // The call below makes vals escape, copy them to avoid that.
- vals := append([]string(nil), vals...)
- return fmt.Errorf(
- "%w: expected %d label values but got %d in %#v",
- errInconsistentCardinality, expectedNumberOfValues,
- len(vals), vals,
- )
- }
-
- for _, val := range vals {
- if !utf8.ValidString(val) {
- return fmt.Errorf("label value %q is not valid UTF-8", val)
- }
- }
-
- return nil
-}
-
-func checkLabelName(l string) bool {
- return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
deleted file mode 100644
index 9d9b81ab4..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ /dev/null
@@ -1,257 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "errors"
- "math"
- "sort"
- "strings"
- "time"
-
- dto "github.com/prometheus/client_model/go"
- "github.com/prometheus/common/model"
- "google.golang.org/protobuf/proto"
-)
-
-var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
-
-// A Metric models a single sample value with its meta data being exported to
-// Prometheus. Implementations of Metric in this package are Gauge, Counter,
-// Histogram, Summary, and Untyped.
-type Metric interface {
- // Desc returns the descriptor for the Metric. This method idempotently
- // returns the same descriptor throughout the lifetime of the
- // Metric. The returned descriptor is immutable by contract. A Metric
- // unable to describe itself must return an invalid descriptor (created
- // with NewInvalidDesc).
- Desc() *Desc
- // Write encodes the Metric into a "Metric" Protocol Buffer data
- // transmission object.
- //
- // Metric implementations must observe concurrency safety as reads of
- // this metric may occur at any time, and any blocking occurs at the
- // expense of total performance of rendering all registered
- // metrics. Ideally, Metric implementations should support concurrent
- // readers.
- //
- // While populating dto.Metric, it is the responsibility of the
- // implementation to ensure validity of the Metric protobuf (like valid
- // UTF-8 strings or syntactically valid metric and label names). It is
- // recommended to sort labels lexicographically. Callers of Write should
- // still make sure of sorting if they depend on it.
- Write(*dto.Metric) error
- // TODO(beorn7): The original rationale of passing in a pre-allocated
- // dto.Metric protobuf to save allocations has disappeared. The
- // signature of this method should be changed to "Write() (*dto.Metric,
- // error)".
-}
-
-// Opts bundles the options for creating most Metric types. Each metric
-// implementation XXX has its own XXXOpts type, but in most cases, it is just
-// an alias of this type (which might change when the requirement arises.)
-//
-// It is mandatory to set Name to a non-empty string. All other fields are
-// optional and can safely be left at their zero value, although it is strongly
-// encouraged to set a Help string.
-type Opts struct {
- // Namespace, Subsystem, and Name are components of the fully-qualified
- // name of the Metric (created by joining these components with
- // "_"). Only Name is mandatory, the others merely help structuring the
- // name. Note that the fully-qualified name of the metric must be a
- // valid Prometheus metric name.
- Namespace string
- Subsystem string
- Name string
-
- // Help provides information about this metric.
- //
- // Metrics with the same fully-qualified name must have the same Help
- // string.
- Help string
-
- // ConstLabels are used to attach fixed labels to this metric. Metrics
- // with the same fully-qualified name must have the same label names in
- // their ConstLabels.
- //
- // ConstLabels are only used rarely. In particular, do not use them to
- // attach the same labels to all your metrics. Those use cases are
- // better covered by target labels set by the scraping Prometheus
- // server, or by one specific metric (e.g. a build_info or a
- // machine_role metric). See also
- // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
- ConstLabels Labels
-
- // now is for testing purposes, by default it's time.Now.
- now func() time.Time
-}
-
-// BuildFQName joins the given three name components by "_". Empty name
-// components are ignored. If the name parameter itself is empty, an empty
-// string is returned, no matter what. Metric implementations included in this
-// library use this function internally to generate the fully-qualified metric
-// name from the name component in their Opts. Users of the library will only
-// need this function if they implement their own Metric or instantiate a Desc
-// (with NewDesc) directly.
-func BuildFQName(namespace, subsystem, name string) string {
- if name == "" {
- return ""
- }
- switch {
- case namespace != "" && subsystem != "":
- return strings.Join([]string{namespace, subsystem, name}, "_")
- case namespace != "":
- return strings.Join([]string{namespace, name}, "_")
- case subsystem != "":
- return strings.Join([]string{subsystem, name}, "_")
- }
- return name
-}
-
-type invalidMetric struct {
- desc *Desc
- err error
-}
-
-// NewInvalidMetric returns a metric whose Write method always returns the
-// provided error. It is useful if a Collector finds itself unable to collect
-// a metric and wishes to report an error to the registry.
-func NewInvalidMetric(desc *Desc, err error) Metric {
- return &invalidMetric{desc, err}
-}
-
-func (m *invalidMetric) Desc() *Desc { return m.desc }
-
-func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
-
-type timestampedMetric struct {
- Metric
- t time.Time
-}
-
-func (m timestampedMetric) Write(pb *dto.Metric) error {
- e := m.Metric.Write(pb)
- pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
- return e
-}
-
-// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
-// way that it has an explicit timestamp set to the provided Time. This is only
-// useful in rare cases as the timestamp of a Prometheus metric should usually
-// be set by the Prometheus server during scraping. Exceptions include mirroring
-// metrics with given timestamps from other metric
-// sources.
-//
-// NewMetricWithTimestamp works best with MustNewConstMetric,
-// MustNewConstHistogram, and MustNewConstSummary, see example.
-//
-// Currently, the exposition formats used by Prometheus are limited to
-// millisecond resolution. Thus, the provided time will be rounded down to the
-// next full millisecond value.
-func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
- return timestampedMetric{Metric: m, t: t}
-}
-
-type withExemplarsMetric struct {
- Metric
-
- exemplars []*dto.Exemplar
-}
-
-func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
- if err := m.Metric.Write(pb); err != nil {
- return err
- }
-
- switch {
- case pb.Counter != nil:
- pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
- case pb.Histogram != nil:
- for _, e := range m.exemplars {
- // pb.Histogram.Bucket are sorted by UpperBound.
- i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool {
- return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue()
- })
- if i < len(pb.Histogram.Bucket) {
- pb.Histogram.Bucket[i].Exemplar = e
- } else {
- // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
- b := &dto.Bucket{
- CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()),
- UpperBound: proto.Float64(math.Inf(1)),
- Exemplar: e,
- }
- pb.Histogram.Bucket = append(pb.Histogram.Bucket, b)
- }
- }
- default:
- // TODO(bwplotka): Implement Gauge?
- return errors.New("cannot inject exemplar into Gauge, Summary or Untyped")
- }
-
- return nil
-}
-
-// Exemplar is easier to use, user-facing representation of *dto.Exemplar.
-type Exemplar struct {
- Value float64
- Labels Labels
- // Optional.
- // Default value (time.Time{}) indicates its empty, which should be
- // understood as time.Now() time at the moment of creation of metric.
- Timestamp time.Time
-}
-
-// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given
-// exemplars. Exemplars are validated.
-//
-// Only last applicable exemplar is injected from the list.
-// For example for Counter it means last exemplar is injected.
-// For Histogram, it means last applicable exemplar for each bucket is injected.
-//
-// NewMetricWithExemplars works best with MustNewConstMetric and
-// MustNewConstHistogram, see example.
-func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) {
- if len(exemplars) == 0 {
- return nil, errors.New("no exemplar was passed for NewMetricWithExemplars")
- }
-
- var (
- now = time.Now()
- exs = make([]*dto.Exemplar, len(exemplars))
- err error
- )
- for i, e := range exemplars {
- ts := e.Timestamp
- if ts.IsZero() {
- ts = now
- }
- exs[i], err = newExemplar(e.Value, ts, e.Labels)
- if err != nil {
- return nil, err
- }
- }
-
- return &withExemplarsMetric{Metric: m, exemplars: exs}, nil
-}
-
-// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where
-// NewMetricWithExemplars would have returned an error.
-func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric {
- ret, err := NewMetricWithExemplars(m, exemplars...)
- if err != nil {
- panic(err)
- }
- return ret
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
deleted file mode 100644
index 7c12b2108..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !js || wasm
-// +build !js wasm
-
-package prometheus
-
-import "runtime"
-
-// getRuntimeNumThreads returns the number of open OS threads.
-func getRuntimeNumThreads() float64 {
- n, _ := runtime.ThreadCreateProfile(nil)
- return float64(n)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
deleted file mode 100644
index 7348df01d..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build js && !wasm
-// +build js,!wasm
-
-package prometheus
-
-// getRuntimeNumThreads returns the number of open OS threads.
-func getRuntimeNumThreads() float64 {
- return 1
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
deleted file mode 100644
index 03773b21f..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-// Observer is the interface that wraps the Observe method, which is used by
-// Histogram and Summary to add observations.
-type Observer interface {
- Observe(float64)
-}
-
-// The ObserverFunc type is an adapter to allow the use of ordinary
-// functions as Observers. If f is a function with the appropriate
-// signature, ObserverFunc(f) is an Observer that calls f.
-//
-// This adapter is usually used in connection with the Timer type, and there are
-// two general use cases:
-//
-// The most common one is to use a Gauge as the Observer for a Timer.
-// See the "Gauge" Timer example.
-//
-// The more advanced use case is to create a function that dynamically decides
-// which Observer to use for observing the duration. See the "Complex" Timer
-// example.
-type ObserverFunc func(float64)
-
-// Observe calls f(value). It implements Observer.
-func (f ObserverFunc) Observe(value float64) {
- f(value)
-}
-
-// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
-type ObserverVec interface {
- GetMetricWith(Labels) (Observer, error)
- GetMetricWithLabelValues(lvs ...string) (Observer, error)
- With(Labels) Observer
- WithLabelValues(...string) Observer
- CurryWith(Labels) (ObserverVec, error)
- MustCurryWith(Labels) ObserverVec
-
- Collector
-}
-
-// ExemplarObserver is implemented by Observers that offer the option of
-// observing a value together with an exemplar. Its ObserveWithExemplar method
-// works like the Observe method of an Observer but also replaces the currently
-// saved exemplar (if any) with a new one, created from the provided value, the
-// current time as timestamp, and the provided Labels. Empty Labels will lead to
-// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
-// left in place. ObserveWithExemplar panics if any of the provided labels are
-// invalid or if the provided labels contain more than 128 runes in total.
-type ExemplarObserver interface {
- ObserveWithExemplar(value float64, exemplar Labels)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
deleted file mode 100644
index 62a4e7ad9..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "errors"
- "fmt"
- "os"
- "strconv"
- "strings"
-)
-
-type processCollector struct {
- collectFn func(chan<- Metric)
- pidFn func() (int, error)
- reportErrors bool
- cpuTotal *Desc
- openFDs, maxFDs *Desc
- vsize, maxVsize *Desc
- rss *Desc
- startTime *Desc
- inBytes, outBytes *Desc
-}
-
-// ProcessCollectorOpts defines the behavior of a process metrics collector
-// created with NewProcessCollector.
-type ProcessCollectorOpts struct {
- // PidFn returns the PID of the process the collector collects metrics
- // for. It is called upon each collection. By default, the PID of the
- // current process is used, as determined on construction time by
- // calling os.Getpid().
- PidFn func() (int, error)
- // If non-empty, each of the collected metrics is prefixed by the
- // provided string and an underscore ("_").
- Namespace string
- // If true, any error encountered during collection is reported as an
- // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
- // and the collected metrics will be incomplete. (Possibly, no metrics
- // will be collected at all.) While that's usually not desired, it is
- // appropriate for the common "mix-in" of process metrics, where process
- // metrics are nice to have, but failing to collect them should not
- // disrupt the collection of the remaining metrics.
- ReportErrors bool
-}
-
-// NewProcessCollector is the obsolete version of collectors.NewProcessCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewProcessCollector instead.
-func NewProcessCollector(opts ProcessCollectorOpts) Collector {
- ns := ""
- if len(opts.Namespace) > 0 {
- ns = opts.Namespace + "_"
- }
-
- c := &processCollector{
- reportErrors: opts.ReportErrors,
- cpuTotal: NewDesc(
- ns+"process_cpu_seconds_total",
- "Total user and system CPU time spent in seconds.",
- nil, nil,
- ),
- openFDs: NewDesc(
- ns+"process_open_fds",
- "Number of open file descriptors.",
- nil, nil,
- ),
- maxFDs: NewDesc(
- ns+"process_max_fds",
- "Maximum number of open file descriptors.",
- nil, nil,
- ),
- vsize: NewDesc(
- ns+"process_virtual_memory_bytes",
- "Virtual memory size in bytes.",
- nil, nil,
- ),
- maxVsize: NewDesc(
- ns+"process_virtual_memory_max_bytes",
- "Maximum amount of virtual memory available in bytes.",
- nil, nil,
- ),
- rss: NewDesc(
- ns+"process_resident_memory_bytes",
- "Resident memory size in bytes.",
- nil, nil,
- ),
- startTime: NewDesc(
- ns+"process_start_time_seconds",
- "Start time of the process since unix epoch in seconds.",
- nil, nil,
- ),
- inBytes: NewDesc(
- ns+"process_network_receive_bytes_total",
- "Number of bytes received by the process over the network.",
- nil, nil,
- ),
- outBytes: NewDesc(
- ns+"process_network_transmit_bytes_total",
- "Number of bytes sent by the process over the network.",
- nil, nil,
- ),
- }
-
- if opts.PidFn == nil {
- c.pidFn = getPIDFn()
- } else {
- c.pidFn = opts.PidFn
- }
-
- // Set up process metric collection if supported by the runtime.
- if canCollectProcess() {
- c.collectFn = c.processCollect
- } else {
- c.collectFn = func(ch chan<- Metric) {
- c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
- }
- }
-
- return c
-}
-
-// Describe returns all descriptions of the collector.
-func (c *processCollector) Describe(ch chan<- *Desc) {
- ch <- c.cpuTotal
- ch <- c.openFDs
- ch <- c.maxFDs
- ch <- c.vsize
- ch <- c.maxVsize
- ch <- c.rss
- ch <- c.startTime
- ch <- c.inBytes
- ch <- c.outBytes
-}
-
-// Collect returns the current state of all metrics of the collector.
-func (c *processCollector) Collect(ch chan<- Metric) {
- c.collectFn(ch)
-}
-
-func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
- if !c.reportErrors {
- return
- }
- if desc == nil {
- desc = NewInvalidDesc(err)
- }
- ch <- NewInvalidMetric(desc, err)
-}
-
-// NewPidFileFn returns a function that retrieves a pid from the specified file.
-// It is meant to be used for the PidFn field in ProcessCollectorOpts.
-func NewPidFileFn(pidFilePath string) func() (int, error) {
- return func() (int, error) {
- content, err := os.ReadFile(pidFilePath)
- if err != nil {
- return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err)
- }
- pid, err := strconv.Atoi(strings.TrimSpace(string(content)))
- if err != nil {
- return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err)
- }
-
- return pid, nil
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go
deleted file mode 100644
index b1e363d6c..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build js
-// +build js
-
-package prometheus
-
-func canCollectProcess() bool {
- return false
-}
-
-func (c *processCollector) processCollect(ch chan<- Metric) {
- // noop on this platform
- return
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
deleted file mode 100644
index 14d56d2d0..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !windows && !js && !wasip1
-// +build !windows,!js,!wasip1
-
-package prometheus
-
-import (
- "github.com/prometheus/procfs"
-)
-
-func canCollectProcess() bool {
- _, err := procfs.NewDefaultFS()
- return err == nil
-}
-
-func (c *processCollector) processCollect(ch chan<- Metric) {
- pid, err := c.pidFn()
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
-
- p, err := procfs.NewProc(pid)
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
-
- if stat, err := p.Stat(); err == nil {
- ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
- ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
- ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
- if startTime, err := stat.StartTime(); err == nil {
- ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
- } else {
- c.reportError(ch, c.startTime, err)
- }
- } else {
- c.reportError(ch, nil, err)
- }
-
- if fds, err := p.FileDescriptorsLen(); err == nil {
- ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
- } else {
- c.reportError(ch, c.openFDs, err)
- }
-
- if limits, err := p.Limits(); err == nil {
- ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
- ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
- } else {
- c.reportError(ch, nil, err)
- }
-
- if netstat, err := p.Netstat(); err == nil {
- var inOctets, outOctets float64
- if netstat.IpExt.InOctets != nil {
- inOctets = *netstat.IpExt.InOctets
- }
- if netstat.IpExt.OutOctets != nil {
- outOctets = *netstat.IpExt.OutOctets
- }
- ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets)
- ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets)
- } else {
- c.reportError(ch, nil, err)
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
deleted file mode 100644
index d8d9a6d7a..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2023 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build wasip1
-// +build wasip1
-
-package prometheus
-
-func canCollectProcess() bool {
- return false
-}
-
-func (*processCollector) processCollect(chan<- Metric) {
- // noop on this platform
- return
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
deleted file mode 100644
index f973398df..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "syscall"
- "unsafe"
-
- "golang.org/x/sys/windows"
-)
-
-func canCollectProcess() bool {
- return true
-}
-
-var (
- modpsapi = syscall.NewLazyDLL("psapi.dll")
- modkernel32 = syscall.NewLazyDLL("kernel32.dll")
-
- procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo")
- procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount")
-)
-
-type processMemoryCounters struct {
- // System interface description
- // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex
-
- // Refer to the Golang internal implementation
- // https://golang.org/src/internal/syscall/windows/psapi_windows.go
- _ uint32
- PageFaultCount uint32
- PeakWorkingSetSize uintptr
- WorkingSetSize uintptr
- QuotaPeakPagedPoolUsage uintptr
- QuotaPagedPoolUsage uintptr
- QuotaPeakNonPagedPoolUsage uintptr
- QuotaNonPagedPoolUsage uintptr
- PagefileUsage uintptr
- PeakPagefileUsage uintptr
- PrivateUsage uintptr
-}
-
-func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
- mem := processMemoryCounters{}
- r1, _, err := procGetProcessMemoryInfo.Call(
- uintptr(handle),
- uintptr(unsafe.Pointer(&mem)),
- uintptr(unsafe.Sizeof(mem)),
- )
- if r1 != 1 {
- return mem, err
- } else {
- return mem, nil
- }
-}
-
-func getProcessHandleCount(handle windows.Handle) (uint32, error) {
- var count uint32
- r1, _, err := procGetProcessHandleCount.Call(
- uintptr(handle),
- uintptr(unsafe.Pointer(&count)),
- )
- if r1 != 1 {
- return 0, err
- } else {
- return count, nil
- }
-}
-
-func (c *processCollector) processCollect(ch chan<- Metric) {
- h, err := windows.GetCurrentProcess()
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
-
- var startTime, exitTime, kernelTime, userTime windows.Filetime
- err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
- ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9))
- ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime))
-
- mem, err := getProcessMemoryInfo(h)
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
- ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage))
- ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize))
-
- handles, err := getProcessHandleCount(h)
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
- ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles))
- ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
-}
-
-func fileTimeToSeconds(ft windows.Filetime) float64 {
- return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
deleted file mode 100644
index 315eab5f1..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
+++ /dev/null
@@ -1,380 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package promhttp
-
-import (
- "bufio"
- "io"
- "net"
- "net/http"
-)
-
-const (
- closeNotifier = 1 << iota
- flusher
- hijacker
- readerFrom
- pusher
-)
-
-type delegator interface {
- http.ResponseWriter
-
- Status() int
- Written() int64
-}
-
-type responseWriterDelegator struct {
- http.ResponseWriter
-
- status int
- written int64
- wroteHeader bool
- observeWriteHeader func(int)
-}
-
-func (r *responseWriterDelegator) Status() int {
- return r.status
-}
-
-func (r *responseWriterDelegator) Written() int64 {
- return r.written
-}
-
-func (r *responseWriterDelegator) WriteHeader(code int) {
- if r.observeWriteHeader != nil && !r.wroteHeader {
- // Only call observeWriteHeader for the 1st time. It's a bug if
- // WriteHeader is called more than once, but we want to protect
- // against it here. Note that we still delegate the WriteHeader
- // to the original ResponseWriter to not mask the bug from it.
- r.observeWriteHeader(code)
- }
- r.status = code
- r.wroteHeader = true
- r.ResponseWriter.WriteHeader(code)
-}
-
-func (r *responseWriterDelegator) Write(b []byte) (int, error) {
- // If applicable, call WriteHeader here so that observeWriteHeader is
- // handled appropriately.
- if !r.wroteHeader {
- r.WriteHeader(http.StatusOK)
- }
- n, err := r.ResponseWriter.Write(b)
- r.written += int64(n)
- return n, err
-}
-
-// Unwrap lets http.ResponseController get the underlying http.ResponseWriter,
-// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface.
-func (r *responseWriterDelegator) Unwrap() http.ResponseWriter {
- return r.ResponseWriter
-}
-
-type (
- closeNotifierDelegator struct{ *responseWriterDelegator }
- flusherDelegator struct{ *responseWriterDelegator }
- hijackerDelegator struct{ *responseWriterDelegator }
- readerFromDelegator struct{ *responseWriterDelegator }
- pusherDelegator struct{ *responseWriterDelegator }
-)
-
-func (d closeNotifierDelegator) CloseNotify() <-chan bool {
- //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
- return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
-}
-
-func (d flusherDelegator) Flush() {
- // If applicable, call WriteHeader here so that observeWriteHeader is
- // handled appropriately.
- if !d.wroteHeader {
- d.WriteHeader(http.StatusOK)
- }
- d.ResponseWriter.(http.Flusher).Flush()
-}
-
-func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
- return d.ResponseWriter.(http.Hijacker).Hijack()
-}
-
-func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
- // If applicable, call WriteHeader here so that observeWriteHeader is
- // handled appropriately.
- if !d.wroteHeader {
- d.WriteHeader(http.StatusOK)
- }
- n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
- d.written += n
- return n, err
-}
-
-func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
- return d.ResponseWriter.(http.Pusher).Push(target, opts)
-}
-
-var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
-
-func init() {
- // TODO(beorn7): Code generation would help here.
- pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
- return d
- }
- pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
- return closeNotifierDelegator{d}
- }
- pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
- return flusherDelegator{d}
- }
- pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
- return struct {
- *responseWriterDelegator
- http.Flusher
- http.CloseNotifier
- }{d, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
- return hijackerDelegator{d}
- }
- pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
- return struct {
- *responseWriterDelegator
- http.Hijacker
- http.CloseNotifier
- }{d, hijackerDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
- return struct {
- *responseWriterDelegator
- http.Hijacker
- http.Flusher
- }{d, hijackerDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
- return struct {
- *responseWriterDelegator
- http.Hijacker
- http.Flusher
- http.CloseNotifier
- }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
- return readerFromDelegator{d}
- }
- pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.CloseNotifier
- }{d, readerFromDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.Flusher
- }{d, readerFromDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.Flusher
- http.CloseNotifier
- }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.Hijacker
- }{d, readerFromDelegator{d}, hijackerDelegator{d}}
- }
- pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.Hijacker
- http.CloseNotifier
- }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.Hijacker
- http.Flusher
- }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.Hijacker
- http.Flusher
- http.CloseNotifier
- }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
- return pusherDelegator{d}
- }
- pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.CloseNotifier
- }{d, pusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.Flusher
- }{d, pusherDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.Flusher
- http.CloseNotifier
- }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.Hijacker
- }{d, pusherDelegator{d}, hijackerDelegator{d}}
- }
- pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.Hijacker
- http.CloseNotifier
- }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.Hijacker
- http.Flusher
- }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.Hijacker
- http.Flusher
- http.CloseNotifier
- }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- }{d, pusherDelegator{d}, readerFromDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.CloseNotifier
- }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.Flusher
- }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.Flusher
- http.CloseNotifier
- }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.Hijacker
- }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.Hijacker
- http.CloseNotifier
- }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.Hijacker
- http.Flusher
- }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.Hijacker
- http.Flusher
- http.CloseNotifier
- }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
-}
-
-func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
- d := &responseWriterDelegator{
- ResponseWriter: w,
- observeWriteHeader: observeWriteHeaderFunc,
- }
-
- id := 0
- //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
- if _, ok := w.(http.CloseNotifier); ok {
- id += closeNotifier
- }
- if _, ok := w.(http.Flusher); ok {
- id += flusher
- }
- if _, ok := w.(http.Hijacker); ok {
- id += hijacker
- }
- if _, ok := w.(io.ReaderFrom); ok {
- id += readerFrom
- }
- if _, ok := w.(http.Pusher); ok {
- id += pusher
- }
-
- return pickDelegator[id](d)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
deleted file mode 100644
index e598e66e6..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ /dev/null
@@ -1,467 +0,0 @@
-// Copyright 2016 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package promhttp provides tooling around HTTP servers and clients.
-//
-// First, the package allows the creation of http.Handler instances to expose
-// Prometheus metrics via HTTP. promhttp.Handler acts on the
-// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
-// custom registry or anything that implements the Gatherer interface. It also
-// allows the creation of handlers that act differently on errors or allow to
-// log errors.
-//
-// Second, the package provides tooling to instrument instances of http.Handler
-// via middleware. Middleware wrappers follow the naming scheme
-// InstrumentHandlerX, where X describes the intended use of the middleware.
-// See each function's doc comment for specific details.
-//
-// Finally, the package allows for an http.RoundTripper to be instrumented via
-// middleware. Middleware wrappers follow the naming scheme
-// InstrumentRoundTripperX, where X describes the intended use of the
-// middleware. See each function's doc comment for specific details.
-package promhttp
-
-import (
- "compress/gzip"
- "errors"
- "fmt"
- "io"
- "net/http"
- "strconv"
- "sync"
- "time"
-
- "github.com/klauspost/compress/zstd"
- "github.com/prometheus/common/expfmt"
-
- "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil"
- "github.com/prometheus/client_golang/prometheus"
-)
-
-const (
- contentTypeHeader = "Content-Type"
- contentEncodingHeader = "Content-Encoding"
- acceptEncodingHeader = "Accept-Encoding"
- processStartTimeHeader = "Process-Start-Time-Unix"
-)
-
-// Compression represents the content encodings handlers support for the HTTP
-// responses.
-type Compression string
-
-const (
- Identity Compression = "identity"
- Gzip Compression = "gzip"
- Zstd Compression = "zstd"
-)
-
-var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd}
-
-var gzipPool = sync.Pool{
- New: func() interface{} {
- return gzip.NewWriter(nil)
- },
-}
-
-// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
-// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
-// no error logging, and it applies compression if requested by the client.
-//
-// The returned http.Handler is already instrumented using the
-// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
-// create multiple http.Handlers by separate calls of the Handler function, the
-// metrics used for instrumentation will be shared between them, providing
-// global scrape counts.
-//
-// This function is meant to cover the bulk of basic use cases. If you are doing
-// anything that requires more customization (including using a non-default
-// Gatherer, different instrumentation, and non-default HandlerOpts), use the
-// HandlerFor function. See there for details.
-func Handler() http.Handler {
- return InstrumentMetricHandler(
- prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
- )
-}
-
-// HandlerFor returns an uninstrumented http.Handler for the provided
-// Gatherer. The behavior of the Handler is defined by the provided
-// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
-// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
-// instrumentation. Use the InstrumentMetricHandler function to apply the same
-// kind of instrumentation as it is used by the Handler function.
-func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
- return HandlerForTransactional(prometheus.ToTransactionalGatherer(reg), opts)
-}
-
-// HandlerForTransactional is like HandlerFor, but it uses transactional gather, which
-// can safely change in-place returned *dto.MetricFamily before call to `Gather` and after
-// call to `done` of that `Gather`.
-func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerOpts) http.Handler {
- var (
- inFlightSem chan struct{}
- errCnt = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Name: "promhttp_metric_handler_errors_total",
- Help: "Total number of internal errors encountered by the promhttp metric handler.",
- },
- []string{"cause"},
- )
- )
-
- if opts.MaxRequestsInFlight > 0 {
- inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
- }
- if opts.Registry != nil {
- // Initialize all possibilities that can occur below.
- errCnt.WithLabelValues("gathering")
- errCnt.WithLabelValues("encoding")
- if err := opts.Registry.Register(errCnt); err != nil {
- are := &prometheus.AlreadyRegisteredError{}
- if errors.As(err, are) {
- errCnt = are.ExistingCollector.(*prometheus.CounterVec)
- } else {
- panic(err)
- }
- }
- }
-
- // Select compression formats to offer based on default or user choice.
- var compressions []string
- if !opts.DisableCompression {
- offers := defaultCompressionFormats
- if len(opts.OfferedCompressions) > 0 {
- offers = opts.OfferedCompressions
- }
- for _, comp := range offers {
- compressions = append(compressions, string(comp))
- }
- }
-
- h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
- if !opts.ProcessStartTime.IsZero() {
- rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
- }
- if inFlightSem != nil {
- select {
- case inFlightSem <- struct{}{}: // All good, carry on.
- defer func() { <-inFlightSem }()
- default:
- http.Error(rsp, fmt.Sprintf(
- "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
- ), http.StatusServiceUnavailable)
- return
- }
- }
- mfs, done, err := reg.Gather()
- defer done()
- if err != nil {
- if opts.ErrorLog != nil {
- opts.ErrorLog.Println("error gathering metrics:", err)
- }
- errCnt.WithLabelValues("gathering").Inc()
- switch opts.ErrorHandling {
- case PanicOnError:
- panic(err)
- case ContinueOnError:
- if len(mfs) == 0 {
- // Still report the error if no metrics have been gathered.
- httpError(rsp, err)
- return
- }
- case HTTPErrorOnError:
- httpError(rsp, err)
- return
- }
- }
-
- var contentType expfmt.Format
- if opts.EnableOpenMetrics {
- contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header)
- } else {
- contentType = expfmt.Negotiate(req.Header)
- }
- rsp.Header().Set(contentTypeHeader, string(contentType))
-
- w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions)
- if err != nil {
- if opts.ErrorLog != nil {
- opts.ErrorLog.Println("error getting writer", err)
- }
- w = io.Writer(rsp)
- encodingHeader = string(Identity)
- }
-
- defer closeWriter()
-
- // Set Content-Encoding only when data is compressed
- if encodingHeader != string(Identity) {
- rsp.Header().Set(contentEncodingHeader, encodingHeader)
- }
- enc := expfmt.NewEncoder(w, contentType)
-
- // handleError handles the error according to opts.ErrorHandling
- // and returns true if we have to abort after the handling.
- handleError := func(err error) bool {
- if err == nil {
- return false
- }
- if opts.ErrorLog != nil {
- opts.ErrorLog.Println("error encoding and sending metric family:", err)
- }
- errCnt.WithLabelValues("encoding").Inc()
- switch opts.ErrorHandling {
- case PanicOnError:
- panic(err)
- case HTTPErrorOnError:
- // We cannot really send an HTTP error at this
- // point because we most likely have written
- // something to rsp already. But at least we can
- // stop sending.
- return true
- }
- // Do nothing in all other cases, including ContinueOnError.
- return false
- }
-
- for _, mf := range mfs {
- if handleError(enc.Encode(mf)) {
- return
- }
- }
- if closer, ok := enc.(expfmt.Closer); ok {
- // This in particular takes care of the final "# EOF\n" line for OpenMetrics.
- if handleError(closer.Close()) {
- return
- }
- }
- })
-
- if opts.Timeout <= 0 {
- return h
- }
- return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
- "Exceeded configured timeout of %v.\n",
- opts.Timeout,
- ))
-}
-
-// InstrumentMetricHandler is usually used with an http.Handler returned by the
-// HandlerFor function. It instruments the provided http.Handler with two
-// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
-// scrapes partitioned by HTTP status code, and a gauge
-// "promhttp_metric_handler_requests_in_flight" to track the number of
-// simultaneous scrapes. This function idempotently registers collectors for
-// both metrics with the provided Registerer. It panics if the registration
-// fails. The provided metrics are useful to see how many scrapes hit the
-// monitored target (which could be from different Prometheus servers or other
-// scrapers), and how often they overlap (which would result in more than one
-// scrape in flight at the same time). Note that the scrapes-in-flight gauge
-// will contain the scrape by which it is exposed, while the scrape counter will
-// only get incremented after the scrape is complete (as only then the status
-// code is known). For tracking scrape durations, use the
-// "scrape_duration_seconds" gauge created by the Prometheus server upon each
-// scrape.
-func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
- cnt := prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Name: "promhttp_metric_handler_requests_total",
- Help: "Total number of scrapes by HTTP status code.",
- },
- []string{"code"},
- )
- // Initialize the most likely HTTP status codes.
- cnt.WithLabelValues("200")
- cnt.WithLabelValues("500")
- cnt.WithLabelValues("503")
- if err := reg.Register(cnt); err != nil {
- are := &prometheus.AlreadyRegisteredError{}
- if errors.As(err, are) {
- cnt = are.ExistingCollector.(*prometheus.CounterVec)
- } else {
- panic(err)
- }
- }
-
- gge := prometheus.NewGauge(prometheus.GaugeOpts{
- Name: "promhttp_metric_handler_requests_in_flight",
- Help: "Current number of scrapes being served.",
- })
- if err := reg.Register(gge); err != nil {
- are := &prometheus.AlreadyRegisteredError{}
- if errors.As(err, are) {
- gge = are.ExistingCollector.(prometheus.Gauge)
- } else {
- panic(err)
- }
- }
-
- return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
-}
-
-// HandlerErrorHandling defines how a Handler serving metrics will handle
-// errors.
-type HandlerErrorHandling int
-
-// These constants cause handlers serving metrics to behave as described if
-// errors are encountered.
-const (
- // Serve an HTTP status code 500 upon the first error
- // encountered. Report the error message in the body. Note that HTTP
- // errors cannot be served anymore once the beginning of a regular
- // payload has been sent. Thus, in the (unlikely) case that encoding the
- // payload into the negotiated wire format fails, serving the response
- // will simply be aborted. Set an ErrorLog in HandlerOpts to detect
- // those errors.
- HTTPErrorOnError HandlerErrorHandling = iota
- // Ignore errors and try to serve as many metrics as possible. However,
- // if no metrics can be served, serve an HTTP status code 500 and the
- // last error message in the body. Only use this in deliberate "best
- // effort" metrics collection scenarios. In this case, it is highly
- // recommended to provide other means of detecting errors: By setting an
- // ErrorLog in HandlerOpts, the errors are logged. By providing a
- // Registry in HandlerOpts, the exposed metrics include an error counter
- // "promhttp_metric_handler_errors_total", which can be used for
- // alerts.
- ContinueOnError
- // Panic upon the first error encountered (useful for "crash only" apps).
- PanicOnError
-)
-
-// Logger is the minimal interface HandlerOpts needs for logging. Note that
-// log.Logger from the standard library implements this interface, and it is
-// easy to implement by custom loggers, if they don't do so already anyway.
-type Logger interface {
- Println(v ...interface{})
-}
-
-// HandlerOpts specifies options how to serve metrics via an http.Handler. The
-// zero value of HandlerOpts is a reasonable default.
-type HandlerOpts struct {
- // ErrorLog specifies an optional Logger for errors collecting and
- // serving metrics. If nil, errors are not logged at all. Note that the
- // type of a reported error is often prometheus.MultiError, which
- // formats into a multi-line error string. If you want to avoid the
- // latter, create a Logger implementation that detects a
- // prometheus.MultiError and formats the contained errors into one line.
- ErrorLog Logger
- // ErrorHandling defines how errors are handled. Note that errors are
- // logged regardless of the configured ErrorHandling provided ErrorLog
- // is not nil.
- ErrorHandling HandlerErrorHandling
- // If Registry is not nil, it is used to register a metric
- // "promhttp_metric_handler_errors_total", partitioned by "cause". A
- // failed registration causes a panic. Note that this error counter is
- // different from the instrumentation you get from the various
- // InstrumentHandler... helpers. It counts errors that don't necessarily
- // result in a non-2xx HTTP status code. There are two typical cases:
- // (1) Encoding errors that only happen after streaming of the HTTP body
- // has already started (and the status code 200 has been sent). This
- // should only happen with custom collectors. (2) Collection errors with
- // no effect on the HTTP status code because ErrorHandling is set to
- // ContinueOnError.
- Registry prometheus.Registerer
- // DisableCompression disables the response encoding (compression) and
- // encoding negotiation. If true, the handler will
- // never compress the response, even if requested
- // by the client and the OfferedCompressions field is set.
- DisableCompression bool
- // OfferedCompressions is a set of encodings (compressions) handler will
- // try to offer when negotiating with the client. This defaults to identity, gzip
- // and zstd.
- // NOTE: If handler can't agree with the client on the encodings or
- // unsupported or empty encodings are set in OfferedCompressions,
- // handler always fallbacks to no compression (identity), for
- // compatibility reasons. In such cases ErrorLog will be used if set.
- OfferedCompressions []Compression
- // The number of concurrent HTTP requests is limited to
- // MaxRequestsInFlight. Additional requests are responded to with 503
- // Service Unavailable and a suitable message in the body. If
- // MaxRequestsInFlight is 0 or negative, no limit is applied.
- MaxRequestsInFlight int
- // If handling a request takes longer than Timeout, it is responded to
- // with 503 ServiceUnavailable and a suitable Message. No timeout is
- // applied if Timeout is 0 or negative. Note that with the current
- // implementation, reaching the timeout simply ends the HTTP requests as
- // described above (and even that only if sending of the body hasn't
- // started yet), while the bulk work of gathering all the metrics keeps
- // running in the background (with the eventual result to be thrown
- // away). Until the implementation is improved, it is recommended to
- // implement a separate timeout in potentially slow Collectors.
- Timeout time.Duration
- // If true, the experimental OpenMetrics encoding is added to the
- // possible options during content negotiation. Note that Prometheus
- // 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is
- // the only way to transmit exemplars. However, the move to OpenMetrics
- // is not completely transparent. Most notably, the values of "quantile"
- // labels of Summaries and "le" labels of Histograms are formatted with
- // a trailing ".0" if they would otherwise look like integer numbers
- // (which changes the identity of the resulting series on the Prometheus
- // server).
- EnableOpenMetrics bool
- // ProcessStartTime allows setting process start timevalue that will be exposed
- // with "Process-Start-Time-Unix" response header along with the metrics
- // payload. This allow callers to have efficient transformations to cumulative
- // counters (e.g. OpenTelemetry) or generally _created timestamp estimation per
- // scrape target.
- // NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus
- // exposition format.
- ProcessStartTime time.Time
-}
-
-// httpError removes any content-encoding header and then calls http.Error with
-// the provided error and http.StatusInternalServerError. Error contents is
-// supposed to be uncompressed plain text. Same as with a plain http.Error, this
-// must not be called if the header or any payload has already been sent.
-func httpError(rsp http.ResponseWriter, err error) {
- rsp.Header().Del(contentEncodingHeader)
- http.Error(
- rsp,
- "An error has occurred while serving metrics:\n\n"+err.Error(),
- http.StatusInternalServerError,
- )
-}
-
-// negotiateEncodingWriter reads the Accept-Encoding header from a request and
-// selects the right compression based on an allow-list of supported
-// compressions. It returns a writer implementing the compression and an the
-// correct value that the caller can set in the response header.
-func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) {
- if len(compressions) == 0 {
- return rw, string(Identity), func() {}, nil
- }
-
- // TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented.
- selected := httputil.NegotiateContentEncoding(r, compressions)
-
- switch selected {
- case "zstd":
- // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented.
- z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest))
- if err != nil {
- return nil, "", func() {}, err
- }
-
- z.Reset(rw)
- return z, selected, func() { _ = z.Close() }, nil
- case "gzip":
- gz := gzipPool.Get().(*gzip.Writer)
- gz.Reset(rw)
- return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil
- case "identity":
- // This means the content is not compressed.
- return rw, selected, func() {}, nil
- default:
- // The content encoding was not implemented yet.
- return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats)
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
deleted file mode 100644
index d3482c40c..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package promhttp
-
-import (
- "crypto/tls"
- "net/http"
- "net/http/httptrace"
- "time"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-// The RoundTripperFunc type is an adapter to allow the use of ordinary
-// functions as RoundTrippers. If f is a function with the appropriate
-// signature, RountTripperFunc(f) is a RoundTripper that calls f.
-type RoundTripperFunc func(req *http.Request) (*http.Response, error)
-
-// RoundTrip implements the RoundTripper interface.
-func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
- return rt(r)
-}
-
-// InstrumentRoundTripperInFlight is a middleware that wraps the provided
-// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
-// requests currently handled by the wrapped http.RoundTripper.
-//
-// See the example for ExampleInstrumentRoundTripperDuration for example usage.
-func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
- return func(r *http.Request) (*http.Response, error) {
- gauge.Inc()
- defer gauge.Dec()
- return next.RoundTrip(r)
- }
-}
-
-// InstrumentRoundTripperCounter is a middleware that wraps the provided
-// http.RoundTripper to observe the request result with the provided CounterVec.
-// The CounterVec must have zero, one, or two non-const non-curried labels. For
-// those, the only allowed label names are "code" and "method". The function
-// panics otherwise. For the "method" label a predefined default label value set
-// is used to filter given values. Values besides predefined values will count
-// as `unknown` method.`WithExtraMethods` can be used to add more
-// methods to the set. Partitioning of the CounterVec happens by HTTP status code
-// and/or HTTP method if the respective instance label names are present in the
-// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
-//
-// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
-// is not incremented.
-//
-// Use with WithExemplarFromContext to instrument the exemplars on the counter of requests.
-//
-// See the example for ExampleInstrumentRoundTripperDuration for example usage.
-func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
- rtOpts := defaultOptions()
- for _, o := range opts {
- o.apply(rtOpts)
- }
-
- // Curry the counter with dynamic labels before checking the remaining labels.
- code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels()))
-
- return func(r *http.Request) (*http.Response, error) {
- resp, err := next.RoundTrip(r)
- if err == nil {
- l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
- for label, resolve := range rtOpts.extraLabelsFromCtx {
- l[label] = resolve(resp.Request.Context())
- }
- addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context()))
- }
- return resp, err
- }
-}
-
-// InstrumentRoundTripperDuration is a middleware that wraps the provided
-// http.RoundTripper to observe the request duration with the provided
-// ObserverVec. The ObserverVec must have zero, one, or two non-const
-// non-curried labels. For those, the only allowed label names are "code" and
-// "method". The function panics otherwise. For the "method" label a predefined
-// default label value set is used to filter given values. Values besides
-// predefined values will count as `unknown` method. `WithExtraMethods`
-// can be used to add more methods to the set. The Observe method of the Observer
-// in the ObserverVec is called with the request duration in
-// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
-// respective instance label names are present in the ObserverVec. For
-// unpartitioned observations, use an ObserverVec with zero labels. Note that
-// partitioning of Histograms is expensive and should be used judiciously.
-//
-// If the wrapped RoundTripper panics or returns a non-nil error, no values are
-// reported.
-//
-// Use with WithExemplarFromContext to instrument the exemplars on the duration histograms.
-//
-// Note that this method is only guaranteed to never observe negative durations
-// if used with Go1.9+.
-func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
- rtOpts := defaultOptions()
- for _, o := range opts {
- o.apply(rtOpts)
- }
-
- // Curry the observer with dynamic labels before checking the remaining labels.
- code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels()))
-
- return func(r *http.Request) (*http.Response, error) {
- start := time.Now()
- resp, err := next.RoundTrip(r)
- if err == nil {
- l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
- for label, resolve := range rtOpts.extraLabelsFromCtx {
- l[label] = resolve(resp.Request.Context())
- }
- observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context()))
- }
- return resp, err
- }
-}
-
-// InstrumentTrace is used to offer flexibility in instrumenting the available
-// httptrace.ClientTrace hook functions. Each function is passed a float64
-// representing the time in seconds since the start of the http request. A user
-// may choose to use separately buckets Histograms, or implement custom
-// instance labels on a per function basis.
-type InstrumentTrace struct {
- GotConn func(float64)
- PutIdleConn func(float64)
- GotFirstResponseByte func(float64)
- Got100Continue func(float64)
- DNSStart func(float64)
- DNSDone func(float64)
- ConnectStart func(float64)
- ConnectDone func(float64)
- TLSHandshakeStart func(float64)
- TLSHandshakeDone func(float64)
- WroteHeaders func(float64)
- Wait100Continue func(float64)
- WroteRequest func(float64)
-}
-
-// InstrumentRoundTripperTrace is a middleware that wraps the provided
-// RoundTripper and reports times to hook functions provided in the
-// InstrumentTrace struct. Hook functions that are not present in the provided
-// InstrumentTrace struct are ignored. Times reported to the hook functions are
-// time since the start of the request. Only with Go1.9+, those times are
-// guaranteed to never be negative. (Earlier Go versions are not using a
-// monotonic clock.) Note that partitioning of Histograms is expensive and
-// should be used judiciously.
-//
-// For hook functions that receive an error as an argument, no observations are
-// made in the event of a non-nil error value.
-//
-// See the example for ExampleInstrumentRoundTripperDuration for example usage.
-func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
- return func(r *http.Request) (*http.Response, error) {
- start := time.Now()
-
- trace := &httptrace.ClientTrace{
- GotConn: func(_ httptrace.GotConnInfo) {
- if it.GotConn != nil {
- it.GotConn(time.Since(start).Seconds())
- }
- },
- PutIdleConn: func(err error) {
- if err != nil {
- return
- }
- if it.PutIdleConn != nil {
- it.PutIdleConn(time.Since(start).Seconds())
- }
- },
- DNSStart: func(_ httptrace.DNSStartInfo) {
- if it.DNSStart != nil {
- it.DNSStart(time.Since(start).Seconds())
- }
- },
- DNSDone: func(_ httptrace.DNSDoneInfo) {
- if it.DNSDone != nil {
- it.DNSDone(time.Since(start).Seconds())
- }
- },
- ConnectStart: func(_, _ string) {
- if it.ConnectStart != nil {
- it.ConnectStart(time.Since(start).Seconds())
- }
- },
- ConnectDone: func(_, _ string, err error) {
- if err != nil {
- return
- }
- if it.ConnectDone != nil {
- it.ConnectDone(time.Since(start).Seconds())
- }
- },
- GotFirstResponseByte: func() {
- if it.GotFirstResponseByte != nil {
- it.GotFirstResponseByte(time.Since(start).Seconds())
- }
- },
- Got100Continue: func() {
- if it.Got100Continue != nil {
- it.Got100Continue(time.Since(start).Seconds())
- }
- },
- TLSHandshakeStart: func() {
- if it.TLSHandshakeStart != nil {
- it.TLSHandshakeStart(time.Since(start).Seconds())
- }
- },
- TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
- if err != nil {
- return
- }
- if it.TLSHandshakeDone != nil {
- it.TLSHandshakeDone(time.Since(start).Seconds())
- }
- },
- WroteHeaders: func() {
- if it.WroteHeaders != nil {
- it.WroteHeaders(time.Since(start).Seconds())
- }
- },
- Wait100Continue: func() {
- if it.Wait100Continue != nil {
- it.Wait100Continue(time.Since(start).Seconds())
- }
- },
- WroteRequest: func(_ httptrace.WroteRequestInfo) {
- if it.WroteRequest != nil {
- it.WroteRequest(time.Since(start).Seconds())
- }
- },
- }
- r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
-
- return next.RoundTrip(r)
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
deleted file mode 100644
index 356edb786..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
+++ /dev/null
@@ -1,576 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package promhttp
-
-import (
- "errors"
- "net/http"
- "strconv"
- "strings"
- "time"
-
- dto "github.com/prometheus/client_model/go"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-// magicString is used for the hacky label test in checkLabels. Remove once fixed.
-const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
-
-// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver],
-// which falls back to [prometheus.Observer.Observe] if no labels are provided.
-func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) {
- if labels == nil {
- obs.Observe(val)
- return
- }
- obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels)
-}
-
-// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar],
-// which falls back to [prometheus.Counter.Add] if no labels are provided.
-func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) {
- if labels == nil {
- obs.Add(val)
- return
- }
- obs.(prometheus.ExemplarAdder).AddWithExemplar(val, labels)
-}
-
-// InstrumentHandlerInFlight is a middleware that wraps the provided
-// http.Handler. It sets the provided prometheus.Gauge to the number of
-// requests currently handled by the wrapped http.Handler.
-//
-// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- g.Inc()
- defer g.Dec()
- next.ServeHTTP(w, r)
- })
-}
-
-// InstrumentHandlerDuration is a middleware that wraps the provided
-// http.Handler to observe the request duration with the provided ObserverVec.
-// The ObserverVec must have valid metric and label names and must have zero,
-// one, or two non-const non-curried labels. For those, the only allowed label
-// names are "code" and "method". The function panics otherwise. For the "method"
-// label a predefined default label value set is used to filter given values.
-// Values besides predefined values will count as `unknown` method.
-// `WithExtraMethods` can be used to add more methods to the set. The Observe
-// method of the Observer in the ObserverVec is called with the request duration
-// in seconds. Partitioning happens by HTTP status code and/or HTTP method if
-// the respective instance label names are present in the ObserverVec. For
-// unpartitioned observations, use an ObserverVec with zero labels. Note that
-// partitioning of Histograms is expensive and should be used judiciously.
-//
-// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
-//
-// If the wrapped Handler panics, no values are reported.
-//
-// Note that this method is only guaranteed to never observe negative durations
-// if used with Go1.9+.
-func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
- hOpts := defaultOptions()
- for _, o := range opts {
- o.apply(hOpts)
- }
-
- // Curry the observer with dynamic labels before checking the remaining labels.
- code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
-
- if code {
- return func(w http.ResponseWriter, r *http.Request) {
- now := time.Now()
- d := newDelegator(w, nil)
- next.ServeHTTP(d, r)
-
- l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
- for label, resolve := range hOpts.extraLabelsFromCtx {
- l[label] = resolve(r.Context())
- }
- observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
- }
- }
-
- return func(w http.ResponseWriter, r *http.Request) {
- now := time.Now()
- next.ServeHTTP(w, r)
- l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
- for label, resolve := range hOpts.extraLabelsFromCtx {
- l[label] = resolve(r.Context())
- }
- observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
- }
-}
-
-// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
-// to observe the request result with the provided CounterVec. The CounterVec
-// must have valid metric and label names and must have zero, one, or two
-// non-const non-curried labels. For those, the only allowed label names are
-// "code" and "method". The function panics otherwise. For the "method"
-// label a predefined default label value set is used to filter given values.
-// Values besides predefined values will count as `unknown` method.
-// `WithExtraMethods` can be used to add more methods to the set. Partitioning of the
-// CounterVec happens by HTTP status code and/or HTTP method if the respective
-// instance label names are present in the CounterVec. For unpartitioned
-// counting, use a CounterVec with zero labels.
-//
-// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
-//
-// If the wrapped Handler panics, the Counter is not incremented.
-//
-// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc {
- hOpts := defaultOptions()
- for _, o := range opts {
- o.apply(hOpts)
- }
-
- // Curry the counter with dynamic labels before checking the remaining labels.
- code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels()))
-
- if code {
- return func(w http.ResponseWriter, r *http.Request) {
- d := newDelegator(w, nil)
- next.ServeHTTP(d, r)
-
- l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
- for label, resolve := range hOpts.extraLabelsFromCtx {
- l[label] = resolve(r.Context())
- }
- addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
- }
- }
-
- return func(w http.ResponseWriter, r *http.Request) {
- next.ServeHTTP(w, r)
-
- l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
- for label, resolve := range hOpts.extraLabelsFromCtx {
- l[label] = resolve(r.Context())
- }
- addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
- }
-}
-
-// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
-// http.Handler to observe with the provided ObserverVec the request duration
-// until the response headers are written. The ObserverVec must have valid
-// metric and label names and must have zero, one, or two non-const non-curried
-// labels. For those, the only allowed label names are "code" and "method". The
-// function panics otherwise. For the "method" label a predefined default label
-// value set is used to filter given values. Values besides predefined values
-// will count as `unknown` method.`WithExtraMethods` can be used to add more
-// methods to the set. The Observe method of the Observer in the
-// ObserverVec is called with the request duration in seconds. Partitioning
-// happens by HTTP status code and/or HTTP method if the respective instance
-// label names are present in the ObserverVec. For unpartitioned observations,
-// use an ObserverVec with zero labels. Note that partitioning of Histograms is
-// expensive and should be used judiciously.
-//
-// If the wrapped Handler panics before calling WriteHeader, no value is
-// reported.
-//
-// Note that this method is only guaranteed to never observe negative durations
-// if used with Go1.9+.
-//
-// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
- hOpts := defaultOptions()
- for _, o := range opts {
- o.apply(hOpts)
- }
-
- // Curry the observer with dynamic labels before checking the remaining labels.
- code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
-
- return func(w http.ResponseWriter, r *http.Request) {
- now := time.Now()
- d := newDelegator(w, func(status int) {
- l := labels(code, method, r.Method, status, hOpts.extraMethods...)
- for label, resolve := range hOpts.extraLabelsFromCtx {
- l[label] = resolve(r.Context())
- }
- observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
- })
- next.ServeHTTP(d, r)
- }
-}
-
-// InstrumentHandlerRequestSize is a middleware that wraps the provided
-// http.Handler to observe the request size with the provided ObserverVec. The
-// ObserverVec must have valid metric and label names and must have zero, one,
-// or two non-const non-curried labels. For those, the only allowed label names
-// are "code" and "method". The function panics otherwise. For the "method"
-// label a predefined default label value set is used to filter given values.
-// Values besides predefined values will count as `unknown` method.
-// `WithExtraMethods` can be used to add more methods to the set. The Observe
-// method of the Observer in the ObserverVec is called with the request size in
-// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
-// respective instance label names are present in the ObserverVec. For
-// unpartitioned observations, use an ObserverVec with zero labels. Note that
-// partitioning of Histograms is expensive and should be used judiciously.
-//
-// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
-//
-// If the wrapped Handler panics, no values are reported.
-//
-// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
- hOpts := defaultOptions()
- for _, o := range opts {
- o.apply(hOpts)
- }
-
- // Curry the observer with dynamic labels before checking the remaining labels.
- code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
-
- if code {
- return func(w http.ResponseWriter, r *http.Request) {
- d := newDelegator(w, nil)
- next.ServeHTTP(d, r)
- size := computeApproximateRequestSize(r)
-
- l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
- for label, resolve := range hOpts.extraLabelsFromCtx {
- l[label] = resolve(r.Context())
- }
- observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
- }
- }
-
- return func(w http.ResponseWriter, r *http.Request) {
- next.ServeHTTP(w, r)
- size := computeApproximateRequestSize(r)
-
- l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
- for label, resolve := range hOpts.extraLabelsFromCtx {
- l[label] = resolve(r.Context())
- }
- observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
- }
-}
-
-// InstrumentHandlerResponseSize is a middleware that wraps the provided
-// http.Handler to observe the response size with the provided ObserverVec. The
-// ObserverVec must have valid metric and label names and must have zero, one,
-// or two non-const non-curried labels. For those, the only allowed label names
-// are "code" and "method". The function panics otherwise. For the "method"
-// label a predefined default label value set is used to filter given values.
-// Values besides predefined values will count as `unknown` method.
-// `WithExtraMethods` can be used to add more methods to the set. The Observe
-// method of the Observer in the ObserverVec is called with the response size in
-// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
-// respective instance label names are present in the ObserverVec. For
-// unpartitioned observations, use an ObserverVec with zero labels. Note that
-// partitioning of Histograms is expensive and should be used judiciously.
-//
-// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
-//
-// If the wrapped Handler panics, no values are reported.
-//
-// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler {
- hOpts := defaultOptions()
- for _, o := range opts {
- o.apply(hOpts)
- }
-
- // Curry the observer with dynamic labels before checking the remaining labels.
- code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
-
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- d := newDelegator(w, nil)
- next.ServeHTTP(d, r)
-
- l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
- for label, resolve := range hOpts.extraLabelsFromCtx {
- l[label] = resolve(r.Context())
- }
- observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context()))
- })
-}
-
-// checkLabels returns whether the provided Collector has a non-const,
-// non-curried label named "code" and/or "method". It panics if the provided
-// Collector does not have a Desc or has more than one Desc or its Desc is
-// invalid. It also panics if the Collector has any non-const, non-curried
-// labels that are not named "code" or "method".
-func checkLabels(c prometheus.Collector) (code, method bool) {
- // TODO(beorn7): Remove this hacky way to check for instance labels
- // once Descriptors can have their dimensionality queried.
- var (
- desc *prometheus.Desc
- m prometheus.Metric
- pm dto.Metric
- lvs []string
- )
-
- // Get the Desc from the Collector.
- descc := make(chan *prometheus.Desc, 1)
- c.Describe(descc)
-
- select {
- case desc = <-descc:
- default:
- panic("no description provided by collector")
- }
- select {
- case <-descc:
- panic("more than one description provided by collector")
- default:
- }
-
- close(descc)
-
- // Make sure the Collector has a valid Desc by registering it with a
- // temporary registry.
- prometheus.NewRegistry().MustRegister(c)
-
- // Create a ConstMetric with the Desc. Since we don't know how many
- // variable labels there are, try for as long as it needs.
- for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
- m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...)
- }
-
- // Write out the metric into a proto message and look at the labels.
- // If the value is not the magicString, it is a constLabel, which doesn't interest us.
- // If the label is curried, it doesn't interest us.
- // In all other cases, only "code" or "method" is allowed.
- if err := m.Write(&pm); err != nil {
- panic("error checking metric for labels")
- }
- for _, label := range pm.Label {
- name, value := label.GetName(), label.GetValue()
- if value != magicString || isLabelCurried(c, name) {
- continue
- }
- switch name {
- case "code":
- code = true
- case "method":
- method = true
- default:
- panic("metric partitioned with non-supported labels")
- }
- }
- return
-}
-
-func isLabelCurried(c prometheus.Collector, label string) bool {
- // This is even hackier than the label test above.
- // We essentially try to curry again and see if it works.
- // But for that, we need to type-convert to the two
- // types we use here, ObserverVec or *CounterVec.
- switch v := c.(type) {
- case *prometheus.CounterVec:
- if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
- return false
- }
- case prometheus.ObserverVec:
- if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
- return false
- }
- default:
- panic("unsupported metric vec type")
- }
- return true
-}
-
-func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
- labels := prometheus.Labels{}
-
- if !(code || method) {
- return labels
- }
-
- if code {
- labels["code"] = sanitizeCode(status)
- }
- if method {
- labels["method"] = sanitizeMethod(reqMethod, extraMethods...)
- }
-
- return labels
-}
-
-func computeApproximateRequestSize(r *http.Request) int {
- s := 0
- if r.URL != nil {
- s += len(r.URL.String())
- }
-
- s += len(r.Method)
- s += len(r.Proto)
- for name, values := range r.Header {
- s += len(name)
- for _, value := range values {
- s += len(value)
- }
- }
- s += len(r.Host)
-
- // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
-
- if r.ContentLength != -1 {
- s += int(r.ContentLength)
- }
- return s
-}
-
-// If the wrapped http.Handler has a known method, it will be sanitized and returned.
-// Otherwise, "unknown" will be returned. The known method list can be extended
-// as needed by using extraMethods parameter.
-func sanitizeMethod(m string, extraMethods ...string) string {
- // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for
- // the methods chosen as default.
- switch m {
- case "GET", "get":
- return "get"
- case "PUT", "put":
- return "put"
- case "HEAD", "head":
- return "head"
- case "POST", "post":
- return "post"
- case "DELETE", "delete":
- return "delete"
- case "CONNECT", "connect":
- return "connect"
- case "OPTIONS", "options":
- return "options"
- case "NOTIFY", "notify":
- return "notify"
- case "TRACE", "trace":
- return "trace"
- case "PATCH", "patch":
- return "patch"
- default:
- for _, method := range extraMethods {
- if strings.EqualFold(m, method) {
- return strings.ToLower(m)
- }
- }
- return "unknown"
- }
-}
-
-// If the wrapped http.Handler has not set a status code, i.e. the value is
-// currently 0, sanitizeCode will return 200, for consistency with behavior in
-// the stdlib.
-func sanitizeCode(s int) string {
- // See for accepted codes https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
- switch s {
- case 100:
- return "100"
- case 101:
- return "101"
-
- case 200, 0:
- return "200"
- case 201:
- return "201"
- case 202:
- return "202"
- case 203:
- return "203"
- case 204:
- return "204"
- case 205:
- return "205"
- case 206:
- return "206"
-
- case 300:
- return "300"
- case 301:
- return "301"
- case 302:
- return "302"
- case 304:
- return "304"
- case 305:
- return "305"
- case 307:
- return "307"
-
- case 400:
- return "400"
- case 401:
- return "401"
- case 402:
- return "402"
- case 403:
- return "403"
- case 404:
- return "404"
- case 405:
- return "405"
- case 406:
- return "406"
- case 407:
- return "407"
- case 408:
- return "408"
- case 409:
- return "409"
- case 410:
- return "410"
- case 411:
- return "411"
- case 412:
- return "412"
- case 413:
- return "413"
- case 414:
- return "414"
- case 415:
- return "415"
- case 416:
- return "416"
- case 417:
- return "417"
- case 418:
- return "418"
-
- case 500:
- return "500"
- case 501:
- return "501"
- case 502:
- return "502"
- case 503:
- return "503"
- case 504:
- return "504"
- case 505:
- return "505"
-
- case 428:
- return "428"
- case 429:
- return "429"
- case 431:
- return "431"
- case 511:
- return "511"
-
- default:
- if s >= 100 && s <= 599 {
- return strconv.Itoa(s)
- }
- return "unknown"
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
deleted file mode 100644
index 5d4383aa1..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package promhttp
-
-import (
- "context"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-// Option are used to configure both handler (middleware) or round tripper.
-type Option interface {
- apply(*options)
-}
-
-// LabelValueFromCtx are used to compute the label value from request context.
-// Context can be filled with values from request through middleware.
-type LabelValueFromCtx func(ctx context.Context) string
-
-// options store options for both a handler or round tripper.
-type options struct {
- extraMethods []string
- getExemplarFn func(requestCtx context.Context) prometheus.Labels
- extraLabelsFromCtx map[string]LabelValueFromCtx
-}
-
-func defaultOptions() *options {
- return &options{
- getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil },
- extraLabelsFromCtx: map[string]LabelValueFromCtx{},
- }
-}
-
-func (o *options) emptyDynamicLabels() prometheus.Labels {
- labels := prometheus.Labels{}
-
- for label := range o.extraLabelsFromCtx {
- labels[label] = ""
- }
-
- return labels
-}
-
-type optionApplyFunc func(*options)
-
-func (o optionApplyFunc) apply(opt *options) { o(opt) }
-
-// WithExtraMethods adds additional HTTP methods to the list of allowed methods.
-// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list.
-//
-// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage.
-func WithExtraMethods(methods ...string) Option {
- return optionApplyFunc(func(o *options) {
- o.extraMethods = methods
- })
-}
-
-// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics.
-// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but
-// metric will continue to observe/increment.
-func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
- return optionApplyFunc(func(o *options) {
- o.getExemplarFn = getExemplarFn
- })
-}
-
-// WithLabelFromCtx registers a label for dynamic resolution with access to context.
-// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage
-func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option {
- return optionApplyFunc(func(o *options) {
- o.extraLabelsFromCtx[name] = valueFn
- })
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
deleted file mode 100644
index c6fd2f58b..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ /dev/null
@@ -1,1076 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "bytes"
- "errors"
- "fmt"
- "os"
- "path/filepath"
- "runtime"
- "sort"
- "strconv"
- "strings"
- "sync"
- "unicode/utf8"
-
- "github.com/prometheus/client_golang/prometheus/internal"
-
- "github.com/cespare/xxhash/v2"
- dto "github.com/prometheus/client_model/go"
- "github.com/prometheus/common/expfmt"
- "google.golang.org/protobuf/proto"
-)
-
-const (
- // Capacity for the channel to collect metrics and descriptors.
- capMetricChan = 1000
- capDescChan = 10
-)
-
-// DefaultRegisterer and DefaultGatherer are the implementations of the
-// Registerer and Gatherer interface a number of convenience functions in this
-// package act on. Initially, both variables point to the same Registry, which
-// has a process collector (currently on Linux only, see NewProcessCollector)
-// and a Go collector (see NewGoCollector, in particular the note about
-// stop-the-world implication with Go versions older than 1.9) already
-// registered. This approach to keep default instances as global state mirrors
-// the approach of other packages in the Go standard library. Note that there
-// are caveats. Change the variables with caution and only if you understand the
-// consequences. Users who want to avoid global state altogether should not use
-// the convenience functions and act on custom instances instead.
-var (
- defaultRegistry = NewRegistry()
- DefaultRegisterer Registerer = defaultRegistry
- DefaultGatherer Gatherer = defaultRegistry
-)
-
-func init() {
- MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
- MustRegister(NewGoCollector())
-}
-
-// NewRegistry creates a new vanilla Registry without any Collectors
-// pre-registered.
-func NewRegistry() *Registry {
- return &Registry{
- collectorsByID: map[uint64]Collector{},
- descIDs: map[uint64]struct{}{},
- dimHashesByName: map[string]uint64{},
- }
-}
-
-// NewPedanticRegistry returns a registry that checks during collection if each
-// collected Metric is consistent with its reported Desc, and if the Desc has
-// actually been registered with the registry. Unchecked Collectors (those whose
-// Describe method does not yield any descriptors) are excluded from the check.
-//
-// Usually, a Registry will be happy as long as the union of all collected
-// Metrics is consistent and valid even if some metrics are not consistent with
-// their own Desc or a Desc provided by their registered Collector. Well-behaved
-// Collectors and Metrics will only provide consistent Descs. This Registry is
-// useful to test the implementation of Collectors and Metrics.
-func NewPedanticRegistry() *Registry {
- r := NewRegistry()
- r.pedanticChecksEnabled = true
- return r
-}
-
-// Registerer is the interface for the part of a registry in charge of
-// registering and unregistering. Users of custom registries should use
-// Registerer as type for registration purposes (rather than the Registry type
-// directly). In that way, they are free to use custom Registerer implementation
-// (e.g. for testing purposes).
-type Registerer interface {
- // Register registers a new Collector to be included in metrics
- // collection. It returns an error if the descriptors provided by the
- // Collector are invalid or if they — in combination with descriptors of
- // already registered Collectors — do not fulfill the consistency and
- // uniqueness criteria described in the documentation of metric.Desc.
- //
- // If the provided Collector is equal to a Collector already registered
- // (which includes the case of re-registering the same Collector), the
- // returned error is an instance of AlreadyRegisteredError, which
- // contains the previously registered Collector.
- //
- // A Collector whose Describe method does not yield any Desc is treated
- // as unchecked. Registration will always succeed. No check for
- // re-registering (see previous paragraph) is performed. Thus, the
- // caller is responsible for not double-registering the same unchecked
- // Collector, and for providing a Collector that will not cause
- // inconsistent metrics on collection. (This would lead to scrape
- // errors.)
- Register(Collector) error
- // MustRegister works like Register but registers any number of
- // Collectors and panics upon the first registration that causes an
- // error.
- MustRegister(...Collector)
- // Unregister unregisters the Collector that equals the Collector passed
- // in as an argument. (Two Collectors are considered equal if their
- // Describe method yields the same set of descriptors.) The function
- // returns whether a Collector was unregistered. Note that an unchecked
- // Collector cannot be unregistered (as its Describe method does not
- // yield any descriptor).
- //
- // Note that even after unregistering, it will not be possible to
- // register a new Collector that is inconsistent with the unregistered
- // Collector, e.g. a Collector collecting metrics with the same name but
- // a different help string. The rationale here is that the same registry
- // instance must only collect consistent metrics throughout its
- // lifetime.
- Unregister(Collector) bool
-}
-
-// Gatherer is the interface for the part of a registry in charge of gathering
-// the collected metrics into a number of MetricFamilies. The Gatherer interface
-// comes with the same general implication as described for the Registerer
-// interface.
-type Gatherer interface {
- // Gather calls the Collect method of the registered Collectors and then
- // gathers the collected metrics into a lexicographically sorted slice
- // of uniquely named MetricFamily protobufs. Gather ensures that the
- // returned slice is valid and self-consistent so that it can be used
- // for valid exposition. As an exception to the strict consistency
- // requirements described for metric.Desc, Gather will tolerate
- // different sets of label names for metrics of the same metric family.
- //
- // Even if an error occurs, Gather attempts to gather as many metrics as
- // possible. Hence, if a non-nil error is returned, the returned
- // MetricFamily slice could be nil (in case of a fatal error that
- // prevented any meaningful metric collection) or contain a number of
- // MetricFamily protobufs, some of which might be incomplete, and some
- // might be missing altogether. The returned error (which might be a
- // MultiError) explains the details. Note that this is mostly useful for
- // debugging purposes. If the gathered protobufs are to be used for
- // exposition in actual monitoring, it is almost always better to not
- // expose an incomplete result and instead disregard the returned
- // MetricFamily protobufs in case the returned error is non-nil.
- Gather() ([]*dto.MetricFamily, error)
-}
-
-// Register registers the provided Collector with the DefaultRegisterer.
-//
-// Register is a shortcut for DefaultRegisterer.Register(c). See there for more
-// details.
-func Register(c Collector) error {
- return DefaultRegisterer.Register(c)
-}
-
-// MustRegister registers the provided Collectors with the DefaultRegisterer and
-// panics if any error occurs.
-//
-// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See
-// there for more details.
-func MustRegister(cs ...Collector) {
- DefaultRegisterer.MustRegister(cs...)
-}
-
-// Unregister removes the registration of the provided Collector from the
-// DefaultRegisterer.
-//
-// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for
-// more details.
-func Unregister(c Collector) bool {
- return DefaultRegisterer.Unregister(c)
-}
-
-// GathererFunc turns a function into a Gatherer.
-type GathererFunc func() ([]*dto.MetricFamily, error)
-
-// Gather implements Gatherer.
-func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
- return gf()
-}
-
-// AlreadyRegisteredError is returned by the Register method if the Collector to
-// be registered has already been registered before, or a different Collector
-// that collects the same metrics has been registered before. Registration fails
-// in that case, but you can detect from the kind of error what has
-// happened. The error contains fields for the existing Collector and the
-// (rejected) new Collector that equals the existing one. This can be used to
-// find out if an equal Collector has been registered before and switch over to
-// using the old one, as demonstrated in the example.
-type AlreadyRegisteredError struct {
- ExistingCollector, NewCollector Collector
-}
-
-func (err AlreadyRegisteredError) Error() string {
- return "duplicate metrics collector registration attempted"
-}
-
-// MultiError is a slice of errors implementing the error interface. It is used
-// by a Gatherer to report multiple errors during MetricFamily gathering.
-type MultiError []error
-
-// Error formats the contained errors as a bullet point list, preceded by the
-// total number of errors. Note that this results in a multi-line string.
-func (errs MultiError) Error() string {
- if len(errs) == 0 {
- return ""
- }
- buf := &bytes.Buffer{}
- fmt.Fprintf(buf, "%d error(s) occurred:", len(errs))
- for _, err := range errs {
- fmt.Fprintf(buf, "\n* %s", err)
- }
- return buf.String()
-}
-
-// Append appends the provided error if it is not nil.
-func (errs *MultiError) Append(err error) {
- if err != nil {
- *errs = append(*errs, err)
- }
-}
-
-// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
-// contained error as error if len(errs is 1). In all other cases, it returns
-// the MultiError directly. This is helpful for returning a MultiError in a way
-// that only uses the MultiError if needed.
-func (errs MultiError) MaybeUnwrap() error {
- switch len(errs) {
- case 0:
- return nil
- case 1:
- return errs[0]
- default:
- return errs
- }
-}
-
-// Registry registers Prometheus collectors, collects their metrics, and gathers
-// them into MetricFamilies for exposition. It implements Registerer, Gatherer,
-// and Collector. The zero value is not usable. Create instances with
-// NewRegistry or NewPedanticRegistry.
-//
-// Registry implements Collector to allow it to be used for creating groups of
-// metrics. See the Grouping example for how this can be done.
-type Registry struct {
- mtx sync.RWMutex
- collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
- descIDs map[uint64]struct{}
- dimHashesByName map[string]uint64
- uncheckedCollectors []Collector
- pedanticChecksEnabled bool
-}
-
-// Register implements Registerer.
-func (r *Registry) Register(c Collector) error {
- var (
- descChan = make(chan *Desc, capDescChan)
- newDescIDs = map[uint64]struct{}{}
- newDimHashesByName = map[string]uint64{}
- collectorID uint64 // All desc IDs XOR'd together.
- duplicateDescErr error
- )
- go func() {
- c.Describe(descChan)
- close(descChan)
- }()
- r.mtx.Lock()
- defer func() {
- // Drain channel in case of premature return to not leak a goroutine.
- for range descChan {
- }
- r.mtx.Unlock()
- }()
- // Conduct various tests...
- for desc := range descChan {
-
- // Is the descriptor valid at all?
- if desc.err != nil {
- return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err)
- }
-
- // Is the descID unique?
- // (In other words: Is the fqName + constLabel combination unique?)
- if _, exists := r.descIDs[desc.id]; exists {
- duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
- }
- // If it is not a duplicate desc in this collector, XOR it to
- // the collectorID. (We allow duplicate descs within the same
- // collector, but their existence must be a no-op.)
- if _, exists := newDescIDs[desc.id]; !exists {
- newDescIDs[desc.id] = struct{}{}
- collectorID ^= desc.id
- }
-
- // Are all the label names and the help string consistent with
- // previous descriptors of the same name?
- // First check existing descriptors...
- if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
- if dimHash != desc.dimHash {
- return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
- }
- continue
- }
-
- // ...then check the new descriptors already seen.
- if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
- if dimHash != desc.dimHash {
- return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
- }
- continue
- }
- newDimHashesByName[desc.fqName] = desc.dimHash
- }
- // A Collector yielding no Desc at all is considered unchecked.
- if len(newDescIDs) == 0 {
- r.uncheckedCollectors = append(r.uncheckedCollectors, c)
- return nil
- }
- if existing, exists := r.collectorsByID[collectorID]; exists {
- switch e := existing.(type) {
- case *wrappingCollector:
- return AlreadyRegisteredError{
- ExistingCollector: e.unwrapRecursively(),
- NewCollector: c,
- }
- default:
- return AlreadyRegisteredError{
- ExistingCollector: e,
- NewCollector: c,
- }
- }
- }
- // If the collectorID is new, but at least one of the descs existed
- // before, we are in trouble.
- if duplicateDescErr != nil {
- return duplicateDescErr
- }
-
- // Only after all tests have passed, actually register.
- r.collectorsByID[collectorID] = c
- for hash := range newDescIDs {
- r.descIDs[hash] = struct{}{}
- }
- for name, dimHash := range newDimHashesByName {
- r.dimHashesByName[name] = dimHash
- }
- return nil
-}
-
-// Unregister implements Registerer.
-func (r *Registry) Unregister(c Collector) bool {
- var (
- descChan = make(chan *Desc, capDescChan)
- descIDs = map[uint64]struct{}{}
- collectorID uint64 // All desc IDs XOR'd together.
- )
- go func() {
- c.Describe(descChan)
- close(descChan)
- }()
- for desc := range descChan {
- if _, exists := descIDs[desc.id]; !exists {
- collectorID ^= desc.id
- descIDs[desc.id] = struct{}{}
- }
- }
-
- r.mtx.RLock()
- if _, exists := r.collectorsByID[collectorID]; !exists {
- r.mtx.RUnlock()
- return false
- }
- r.mtx.RUnlock()
-
- r.mtx.Lock()
- defer r.mtx.Unlock()
-
- delete(r.collectorsByID, collectorID)
- for id := range descIDs {
- delete(r.descIDs, id)
- }
- // dimHashesByName is left untouched as those must be consistent
- // throughout the lifetime of a program.
- return true
-}
-
-// MustRegister implements Registerer.
-func (r *Registry) MustRegister(cs ...Collector) {
- for _, c := range cs {
- if err := r.Register(c); err != nil {
- panic(err)
- }
- }
-}
-
-// Gather implements Gatherer.
-func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
- r.mtx.RLock()
-
- if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 {
- // Fast path.
- r.mtx.RUnlock()
- return nil, nil
- }
-
- var (
- checkedMetricChan = make(chan Metric, capMetricChan)
- uncheckedMetricChan = make(chan Metric, capMetricChan)
- metricHashes = map[uint64]struct{}{}
- wg sync.WaitGroup
- errs MultiError // The collected errors to return in the end.
- registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
- )
-
- goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
- metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
- checkedCollectors := make(chan Collector, len(r.collectorsByID))
- uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
- for _, collector := range r.collectorsByID {
- checkedCollectors <- collector
- }
- for _, collector := range r.uncheckedCollectors {
- uncheckedCollectors <- collector
- }
- // In case pedantic checks are enabled, we have to copy the map before
- // giving up the RLock.
- if r.pedanticChecksEnabled {
- registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs))
- for id := range r.descIDs {
- registeredDescIDs[id] = struct{}{}
- }
- }
- r.mtx.RUnlock()
-
- wg.Add(goroutineBudget)
-
- collectWorker := func() {
- for {
- select {
- case collector := <-checkedCollectors:
- collector.Collect(checkedMetricChan)
- case collector := <-uncheckedCollectors:
- collector.Collect(uncheckedMetricChan)
- default:
- return
- }
- wg.Done()
- }
- }
-
- // Start the first worker now to make sure at least one is running.
- go collectWorker()
- goroutineBudget--
-
- // Close checkedMetricChan and uncheckedMetricChan once all collectors
- // are collected.
- go func() {
- wg.Wait()
- close(checkedMetricChan)
- close(uncheckedMetricChan)
- }()
-
- // Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
- defer func() {
- if checkedMetricChan != nil {
- for range checkedMetricChan {
- }
- }
- if uncheckedMetricChan != nil {
- for range uncheckedMetricChan {
- }
- }
- }()
-
- // Copy the channel references so we can nil them out later to remove
- // them from the select statements below.
- cmc := checkedMetricChan
- umc := uncheckedMetricChan
-
- for {
- select {
- case metric, ok := <-cmc:
- if !ok {
- cmc = nil
- break
- }
- errs.Append(processMetric(
- metric, metricFamiliesByName,
- metricHashes,
- registeredDescIDs,
- ))
- case metric, ok := <-umc:
- if !ok {
- umc = nil
- break
- }
- errs.Append(processMetric(
- metric, metricFamiliesByName,
- metricHashes,
- nil,
- ))
- default:
- if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
- // All collectors are already being worked on or
- // we have already as many goroutines started as
- // there are collectors. Do the same as above,
- // just without the default.
- select {
- case metric, ok := <-cmc:
- if !ok {
- cmc = nil
- break
- }
- errs.Append(processMetric(
- metric, metricFamiliesByName,
- metricHashes,
- registeredDescIDs,
- ))
- case metric, ok := <-umc:
- if !ok {
- umc = nil
- break
- }
- errs.Append(processMetric(
- metric, metricFamiliesByName,
- metricHashes,
- nil,
- ))
- }
- break
- }
- // Start more workers.
- go collectWorker()
- goroutineBudget--
- runtime.Gosched()
- }
- // Once both checkedMetricChan and uncheckedMetricChan are closed
- // and drained, the contraption above will nil out cmc and umc,
- // and then we can leave the collect loop here.
- if cmc == nil && umc == nil {
- break
- }
- }
- return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
-}
-
-// Describe implements Collector.
-func (r *Registry) Describe(ch chan<- *Desc) {
- r.mtx.RLock()
- defer r.mtx.RUnlock()
-
- // Only report the checked Collectors; unchecked collectors don't report any
- // Desc.
- for _, c := range r.collectorsByID {
- c.Describe(ch)
- }
-}
-
-// Collect implements Collector.
-func (r *Registry) Collect(ch chan<- Metric) {
- r.mtx.RLock()
- defer r.mtx.RUnlock()
-
- for _, c := range r.collectorsByID {
- c.Collect(ch)
- }
- for _, c := range r.uncheckedCollectors {
- c.Collect(ch)
- }
-}
-
-// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
-// Prometheus text format, and writes it to a temporary file. Upon success, the
-// temporary file is renamed to the provided filename.
-//
-// This is intended for use with the textfile collector of the node exporter.
-// Note that the node exporter expects the filename to be suffixed with ".prom".
-func WriteToTextfile(filename string, g Gatherer) error {
- tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
- if err != nil {
- return err
- }
- defer os.Remove(tmp.Name())
-
- mfs, err := g.Gather()
- if err != nil {
- return err
- }
- for _, mf := range mfs {
- if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil {
- return err
- }
- }
- if err := tmp.Close(); err != nil {
- return err
- }
-
- if err := os.Chmod(tmp.Name(), 0o644); err != nil {
- return err
- }
- return os.Rename(tmp.Name(), filename)
-}
-
-// processMetric is an internal helper method only used by the Gather method.
-func processMetric(
- metric Metric,
- metricFamiliesByName map[string]*dto.MetricFamily,
- metricHashes map[uint64]struct{},
- registeredDescIDs map[uint64]struct{},
-) error {
- desc := metric.Desc()
- // Wrapped metrics collected by an unchecked Collector can have an
- // invalid Desc.
- if desc.err != nil {
- return desc.err
- }
- dtoMetric := &dto.Metric{}
- if err := metric.Write(dtoMetric); err != nil {
- return fmt.Errorf("error collecting metric %v: %w", desc, err)
- }
- metricFamily, ok := metricFamiliesByName[desc.fqName]
- if ok { // Existing name.
- if metricFamily.GetHelp() != desc.help {
- return fmt.Errorf(
- "collected metric %s %s has help %q but should have %q",
- desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
- )
- }
- // TODO(beorn7): Simplify switch once Desc has type.
- switch metricFamily.GetType() {
- case dto.MetricType_COUNTER:
- if dtoMetric.Counter == nil {
- return fmt.Errorf(
- "collected metric %s %s should be a Counter",
- desc.fqName, dtoMetric,
- )
- }
- case dto.MetricType_GAUGE:
- if dtoMetric.Gauge == nil {
- return fmt.Errorf(
- "collected metric %s %s should be a Gauge",
- desc.fqName, dtoMetric,
- )
- }
- case dto.MetricType_SUMMARY:
- if dtoMetric.Summary == nil {
- return fmt.Errorf(
- "collected metric %s %s should be a Summary",
- desc.fqName, dtoMetric,
- )
- }
- case dto.MetricType_UNTYPED:
- if dtoMetric.Untyped == nil {
- return fmt.Errorf(
- "collected metric %s %s should be Untyped",
- desc.fqName, dtoMetric,
- )
- }
- case dto.MetricType_HISTOGRAM:
- if dtoMetric.Histogram == nil {
- return fmt.Errorf(
- "collected metric %s %s should be a Histogram",
- desc.fqName, dtoMetric,
- )
- }
- default:
- panic("encountered MetricFamily with invalid type")
- }
- } else { // New name.
- metricFamily = &dto.MetricFamily{}
- metricFamily.Name = proto.String(desc.fqName)
- metricFamily.Help = proto.String(desc.help)
- // TODO(beorn7): Simplify switch once Desc has type.
- switch {
- case dtoMetric.Gauge != nil:
- metricFamily.Type = dto.MetricType_GAUGE.Enum()
- case dtoMetric.Counter != nil:
- metricFamily.Type = dto.MetricType_COUNTER.Enum()
- case dtoMetric.Summary != nil:
- metricFamily.Type = dto.MetricType_SUMMARY.Enum()
- case dtoMetric.Untyped != nil:
- metricFamily.Type = dto.MetricType_UNTYPED.Enum()
- case dtoMetric.Histogram != nil:
- metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
- default:
- return fmt.Errorf("empty metric collected: %s", dtoMetric)
- }
- if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
- return err
- }
- metricFamiliesByName[desc.fqName] = metricFamily
- }
- if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
- return err
- }
- if registeredDescIDs != nil {
- // Is the desc registered at all?
- if _, exist := registeredDescIDs[desc.id]; !exist {
- return fmt.Errorf(
- "collected metric %s %s with unregistered descriptor %s",
- metricFamily.GetName(), dtoMetric, desc,
- )
- }
- if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
- return err
- }
- }
- metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
- return nil
-}
-
-// Gatherers is a slice of Gatherer instances that implements the Gatherer
-// interface itself. Its Gather method calls Gather on all Gatherers in the
-// slice in order and returns the merged results. Errors returned from the
-// Gather calls are all returned in a flattened MultiError. Duplicate and
-// inconsistent Metrics are skipped (first occurrence in slice order wins) and
-// reported in the returned error.
-//
-// Gatherers can be used to merge the Gather results from multiple
-// Registries. It also provides a way to directly inject existing MetricFamily
-// protobufs into the gathering by creating a custom Gatherer with a Gather
-// method that simply returns the existing MetricFamily protobufs. Note that no
-// registration is involved (in contrast to Collector registration), so
-// obviously registration-time checks cannot happen. Any inconsistencies between
-// the gathered MetricFamilies are reported as errors by the Gather method, and
-// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies
-// (e.g. syntactically invalid metric or label names) will go undetected.
-type Gatherers []Gatherer
-
-// Gather implements Gatherer.
-func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
- var (
- metricFamiliesByName = map[string]*dto.MetricFamily{}
- metricHashes = map[uint64]struct{}{}
- errs MultiError // The collected errors to return in the end.
- )
-
- for i, g := range gs {
- mfs, err := g.Gather()
- if err != nil {
- multiErr := MultiError{}
- if errors.As(err, &multiErr) {
- for _, err := range multiErr {
- errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
- }
- } else {
- errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
- }
- }
- for _, mf := range mfs {
- existingMF, exists := metricFamiliesByName[mf.GetName()]
- if exists {
- if existingMF.GetHelp() != mf.GetHelp() {
- errs = append(errs, fmt.Errorf(
- "gathered metric family %s has help %q but should have %q",
- mf.GetName(), mf.GetHelp(), existingMF.GetHelp(),
- ))
- continue
- }
- if existingMF.GetType() != mf.GetType() {
- errs = append(errs, fmt.Errorf(
- "gathered metric family %s has type %s but should have %s",
- mf.GetName(), mf.GetType(), existingMF.GetType(),
- ))
- continue
- }
- } else {
- existingMF = &dto.MetricFamily{}
- existingMF.Name = mf.Name
- existingMF.Help = mf.Help
- existingMF.Type = mf.Type
- if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
- errs = append(errs, err)
- continue
- }
- metricFamiliesByName[mf.GetName()] = existingMF
- }
- for _, m := range mf.Metric {
- if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
- errs = append(errs, err)
- continue
- }
- existingMF.Metric = append(existingMF.Metric, m)
- }
- }
- }
- return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
-}
-
-// checkSuffixCollisions checks for collisions with the “magic” suffixes the
-// Prometheus text format and the internal metric representation of the
-// Prometheus server add while flattening Summaries and Histograms.
-func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
- var (
- newName = mf.GetName()
- newType = mf.GetType()
- newNameWithoutSuffix = ""
- )
- switch {
- case strings.HasSuffix(newName, "_count"):
- newNameWithoutSuffix = newName[:len(newName)-6]
- case strings.HasSuffix(newName, "_sum"):
- newNameWithoutSuffix = newName[:len(newName)-4]
- case strings.HasSuffix(newName, "_bucket"):
- newNameWithoutSuffix = newName[:len(newName)-7]
- }
- if newNameWithoutSuffix != "" {
- if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
- switch existingMF.GetType() {
- case dto.MetricType_SUMMARY:
- if !strings.HasSuffix(newName, "_bucket") {
- return fmt.Errorf(
- "collected metric named %q collides with previously collected summary named %q",
- newName, newNameWithoutSuffix,
- )
- }
- case dto.MetricType_HISTOGRAM:
- return fmt.Errorf(
- "collected metric named %q collides with previously collected histogram named %q",
- newName, newNameWithoutSuffix,
- )
- }
- }
- }
- if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
- if _, ok := mfs[newName+"_count"]; ok {
- return fmt.Errorf(
- "collected histogram or summary named %q collides with previously collected metric named %q",
- newName, newName+"_count",
- )
- }
- if _, ok := mfs[newName+"_sum"]; ok {
- return fmt.Errorf(
- "collected histogram or summary named %q collides with previously collected metric named %q",
- newName, newName+"_sum",
- )
- }
- }
- if newType == dto.MetricType_HISTOGRAM {
- if _, ok := mfs[newName+"_bucket"]; ok {
- return fmt.Errorf(
- "collected histogram named %q collides with previously collected metric named %q",
- newName, newName+"_bucket",
- )
- }
- }
- return nil
-}
-
-// checkMetricConsistency checks if the provided Metric is consistent with the
-// provided MetricFamily. It also hashes the Metric labels and the MetricFamily
-// name. If the resulting hash is already in the provided metricHashes, an error
-// is returned. If not, it is added to metricHashes.
-func checkMetricConsistency(
- metricFamily *dto.MetricFamily,
- dtoMetric *dto.Metric,
- metricHashes map[uint64]struct{},
-) error {
- name := metricFamily.GetName()
-
- // Type consistency with metric family.
- if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
- metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
- metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
- metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
- metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
- return fmt.Errorf(
- "collected metric %q { %s} is not a %s",
- name, dtoMetric, metricFamily.GetType(),
- )
- }
-
- previousLabelName := ""
- for _, labelPair := range dtoMetric.GetLabel() {
- labelName := labelPair.GetName()
- if labelName == previousLabelName {
- return fmt.Errorf(
- "collected metric %q { %s} has two or more labels with the same name: %s",
- name, dtoMetric, labelName,
- )
- }
- if !checkLabelName(labelName) {
- return fmt.Errorf(
- "collected metric %q { %s} has a label with an invalid name: %s",
- name, dtoMetric, labelName,
- )
- }
- if dtoMetric.Summary != nil && labelName == quantileLabel {
- return fmt.Errorf(
- "collected metric %q { %s} must not have an explicit %q label",
- name, dtoMetric, quantileLabel,
- )
- }
- if !utf8.ValidString(labelPair.GetValue()) {
- return fmt.Errorf(
- "collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
- name, dtoMetric, labelName, labelPair.GetValue())
- }
- previousLabelName = labelName
- }
-
- // Is the metric unique (i.e. no other metric with the same name and the same labels)?
- h := xxhash.New()
- h.WriteString(name)
- h.Write(separatorByteSlice)
- // Make sure label pairs are sorted. We depend on it for the consistency
- // check.
- if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) {
- // We cannot sort dtoMetric.Label in place as it is immutable by contract.
- copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
- copy(copiedLabels, dtoMetric.Label)
- sort.Sort(internal.LabelPairSorter(copiedLabels))
- dtoMetric.Label = copiedLabels
- }
- for _, lp := range dtoMetric.Label {
- h.WriteString(lp.GetName())
- h.Write(separatorByteSlice)
- h.WriteString(lp.GetValue())
- h.Write(separatorByteSlice)
- }
- if dtoMetric.TimestampMs != nil {
- h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10))
- h.Write(separatorByteSlice)
- }
- hSum := h.Sum64()
- if _, exists := metricHashes[hSum]; exists {
- return fmt.Errorf(
- "collected metric %q { %s} was collected before with the same name and label values",
- name, dtoMetric,
- )
- }
- metricHashes[hSum] = struct{}{}
- return nil
-}
-
-func checkDescConsistency(
- metricFamily *dto.MetricFamily,
- dtoMetric *dto.Metric,
- desc *Desc,
-) error {
- // Desc help consistency with metric family help.
- if metricFamily.GetHelp() != desc.help {
- return fmt.Errorf(
- "collected metric %s %s has help %q but should have %q",
- metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
- )
- }
-
- // Is the desc consistent with the content of the metric?
- lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
- copy(lpsFromDesc, desc.constLabelPairs)
- for _, l := range desc.variableLabels.names {
- lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
- Name: proto.String(l),
- })
- }
- if len(lpsFromDesc) != len(dtoMetric.Label) {
- return fmt.Errorf(
- "labels in collected metric %s %s are inconsistent with descriptor %s",
- metricFamily.GetName(), dtoMetric, desc,
- )
- }
- sort.Sort(internal.LabelPairSorter(lpsFromDesc))
- for i, lpFromDesc := range lpsFromDesc {
- lpFromMetric := dtoMetric.Label[i]
- if lpFromDesc.GetName() != lpFromMetric.GetName() ||
- lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
- return fmt.Errorf(
- "labels in collected metric %s %s are inconsistent with descriptor %s",
- metricFamily.GetName(), dtoMetric, desc,
- )
- }
- }
- return nil
-}
-
-var _ TransactionalGatherer = &MultiTRegistry{}
-
-// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple
-// transactional gatherers.
-//
-// It is caller responsibility to ensure two registries have mutually exclusive metric families,
-// no deduplication will happen.
-type MultiTRegistry struct {
- tGatherers []TransactionalGatherer
-}
-
-// NewMultiTRegistry creates MultiTRegistry.
-func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry {
- return &MultiTRegistry{
- tGatherers: tGatherers,
- }
-}
-
-// Gather implements TransactionalGatherer interface.
-func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) {
- errs := MultiError{}
-
- dFns := make([]func(), 0, len(r.tGatherers))
- // TODO(bwplotka): Implement concurrency for those?
- for _, g := range r.tGatherers {
- // TODO(bwplotka): Check for duplicates?
- m, d, err := g.Gather()
- errs.Append(err)
-
- mfs = append(mfs, m...)
- dFns = append(dFns, d)
- }
-
- // TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already.
- sort.Slice(mfs, func(i, j int) bool {
- return *mfs[i].Name < *mfs[j].Name
- })
- return mfs, func() {
- for _, d := range dFns {
- d()
- }
- }, errs.MaybeUnwrap()
-}
-
-// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory
-// used by metric family is no longer used by a caller. This allows implementations with cache.
-type TransactionalGatherer interface {
- // Gather returns metrics in a lexicographically sorted slice
- // of uniquely named MetricFamily protobufs. Gather ensures that the
- // returned slice is valid and self-consistent so that it can be used
- // for valid exposition. As an exception to the strict consistency
- // requirements described for metric.Desc, Gather will tolerate
- // different sets of label names for metrics of the same metric family.
- //
- // Even if an error occurs, Gather attempts to gather as many metrics as
- // possible. Hence, if a non-nil error is returned, the returned
- // MetricFamily slice could be nil (in case of a fatal error that
- // prevented any meaningful metric collection) or contain a number of
- // MetricFamily protobufs, some of which might be incomplete, and some
- // might be missing altogether. The returned error (which might be a
- // MultiError) explains the details. Note that this is mostly useful for
- // debugging purposes. If the gathered protobufs are to be used for
- // exposition in actual monitoring, it is almost always better to not
- // expose an incomplete result and instead disregard the returned
- // MetricFamily protobufs in case the returned error is non-nil.
- //
- // Important: done is expected to be triggered (even if the error occurs!)
- // once caller does not need returned slice of dto.MetricFamily.
- Gather() (_ []*dto.MetricFamily, done func(), err error)
-}
-
-// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function.
-func ToTransactionalGatherer(g Gatherer) TransactionalGatherer {
- return &noTransactionGatherer{g: g}
-}
-
-type noTransactionGatherer struct {
- g Gatherer
-}
-
-// Gather implements TransactionalGatherer interface.
-func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) {
- mfs, err := g.g.Gather()
- return mfs, func() {}, err
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
deleted file mode 100644
index 1ab0e4796..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ /dev/null
@@ -1,827 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "fmt"
- "math"
- "runtime"
- "sort"
- "sync"
- "sync/atomic"
- "time"
-
- dto "github.com/prometheus/client_model/go"
-
- "github.com/beorn7/perks/quantile"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-// quantileLabel is used for the label that defines the quantile in a
-// summary.
-const quantileLabel = "quantile"
-
-// A Summary captures individual observations from an event or sample stream and
-// summarizes them in a manner similar to traditional summary statistics: 1. sum
-// of observations, 2. observation count, 3. rank estimations.
-//
-// A typical use-case is the observation of request latencies. By default, a
-// Summary provides the median, the 90th and the 99th percentile of the latency
-// as rank estimations. However, the default behavior will change in the
-// upcoming v1.0.0 of the library. There will be no rank estimations at all by
-// default. For a sane transition, it is recommended to set the desired rank
-// estimations explicitly.
-//
-// Note that the rank estimations cannot be aggregated in a meaningful way with
-// the Prometheus query language (i.e. you cannot average or add them). If you
-// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
-// queries served across all instances of a service), consider the Histogram
-// metric type. See the Prometheus documentation for more details.
-//
-// To create Summary instances, use NewSummary.
-type Summary interface {
- Metric
- Collector
-
- // Observe adds a single observation to the summary. Observations are
- // usually positive or zero. Negative observations are accepted but
- // prevent current versions of Prometheus from properly detecting
- // counter resets in the sum of observations. See
- // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
- // for details.
- Observe(float64)
-}
-
-var errQuantileLabelNotAllowed = fmt.Errorf(
- "%q is not allowed as label name in summaries", quantileLabel,
-)
-
-// Default values for SummaryOpts.
-const (
- // DefMaxAge is the default duration for which observations stay
- // relevant.
- DefMaxAge time.Duration = 10 * time.Minute
- // DefAgeBuckets is the default number of buckets used to calculate the
- // age of observations.
- DefAgeBuckets = 5
- // DefBufCap is the standard buffer size for collecting Summary observations.
- DefBufCap = 500
-)
-
-// SummaryOpts bundles the options for creating a Summary metric. It is
-// mandatory to set Name to a non-empty string. While all other fields are
-// optional and can safely be left at their zero value, it is recommended to set
-// a help string and to explicitly set the Objectives field to the desired value
-// as the default value will change in the upcoming v1.0.0 of the library.
-type SummaryOpts struct {
- // Namespace, Subsystem, and Name are components of the fully-qualified
- // name of the Summary (created by joining these components with
- // "_"). Only Name is mandatory, the others merely help structuring the
- // name. Note that the fully-qualified name of the Summary must be a
- // valid Prometheus metric name.
- Namespace string
- Subsystem string
- Name string
-
- // Help provides information about this Summary.
- //
- // Metrics with the same fully-qualified name must have the same Help
- // string.
- Help string
-
- // ConstLabels are used to attach fixed labels to this metric. Metrics
- // with the same fully-qualified name must have the same label names in
- // their ConstLabels.
- //
- // Due to the way a Summary is represented in the Prometheus text format
- // and how it is handled by the Prometheus server internally, “quantile”
- // is an illegal label name. Construction of a Summary or SummaryVec
- // will panic if this label name is used in ConstLabels.
- //
- // ConstLabels are only used rarely. In particular, do not use them to
- // attach the same labels to all your metrics. Those use cases are
- // better covered by target labels set by the scraping Prometheus
- // server, or by one specific metric (e.g. a build_info or a
- // machine_role metric). See also
- // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
- ConstLabels Labels
-
- // Objectives defines the quantile rank estimates with their respective
- // absolute error. If Objectives[q] = e, then the value reported for q
- // will be the φ-quantile value for some φ between q-e and q+e. The
- // default value is an empty map, resulting in a summary without
- // quantiles.
- Objectives map[float64]float64
-
- // MaxAge defines the duration for which an observation stays relevant
- // for the summary. Only applies to pre-calculated quantiles, does not
- // apply to _sum and _count. Must be positive. The default value is
- // DefMaxAge.
- MaxAge time.Duration
-
- // AgeBuckets is the number of buckets used to exclude observations that
- // are older than MaxAge from the summary. A higher number has a
- // resource penalty, so only increase it if the higher resolution is
- // really required. For very high observation rates, you might want to
- // reduce the number of age buckets. With only one age bucket, you will
- // effectively see a complete reset of the summary each time MaxAge has
- // passed. The default value is DefAgeBuckets.
- AgeBuckets uint32
-
- // BufCap defines the default sample stream buffer size. The default
- // value of DefBufCap should suffice for most uses. If there is a need
- // to increase the value, a multiple of 500 is recommended (because that
- // is the internal buffer size of the underlying package
- // "github.com/bmizerany/perks/quantile").
- BufCap uint32
-
- // now is for testing purposes, by default it's time.Now.
- now func() time.Time
-}
-
-// SummaryVecOpts bundles the options to create a SummaryVec metric.
-// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels
-// is optional and can safely be left to its default value.
-type SummaryVecOpts struct {
- SummaryOpts
-
- // VariableLabels are used to partition the metric vector by the given set
- // of labels. Each label value will be constrained with the optional Constraint
- // function, if provided.
- VariableLabels ConstrainableLabels
-}
-
-// Problem with the sliding-window decay algorithm... The Merge method of
-// perk/quantile is actually not working as advertised - and it might be
-// unfixable, as the underlying algorithm is apparently not capable of merging
-// summaries in the first place. To avoid using Merge, we are currently adding
-// observations to _each_ age bucket, i.e. the effort to add a sample is
-// essentially multiplied by the number of age buckets. When rotating age
-// buckets, we empty the previous head stream. On scrape time, we simply take
-// the quantiles from the head stream (no merging required). Result: More effort
-// on observation time, less effort on scrape time, which is exactly the
-// opposite of what we try to accomplish, but at least the results are correct.
-//
-// The quite elegant previous contraption to merge the age buckets efficiently
-// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
-// can't be used anymore.
-
-// NewSummary creates a new Summary based on the provided SummaryOpts.
-func NewSummary(opts SummaryOpts) Summary {
- return newSummary(
- NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ),
- opts,
- )
-}
-
-func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
- if len(desc.variableLabels.names) != len(labelValues) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
- }
-
- for _, n := range desc.variableLabels.names {
- if n == quantileLabel {
- panic(errQuantileLabelNotAllowed)
- }
- }
- for _, lp := range desc.constLabelPairs {
- if lp.GetName() == quantileLabel {
- panic(errQuantileLabelNotAllowed)
- }
- }
-
- if opts.Objectives == nil {
- opts.Objectives = map[float64]float64{}
- }
-
- if opts.MaxAge < 0 {
- panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
- }
- if opts.MaxAge == 0 {
- opts.MaxAge = DefMaxAge
- }
-
- if opts.AgeBuckets == 0 {
- opts.AgeBuckets = DefAgeBuckets
- }
-
- if opts.BufCap == 0 {
- opts.BufCap = DefBufCap
- }
-
- if opts.now == nil {
- opts.now = time.Now
- }
- if len(opts.Objectives) == 0 {
- // Use the lock-free implementation of a Summary without objectives.
- s := &noObjectivesSummary{
- desc: desc,
- labelPairs: MakeLabelPairs(desc, labelValues),
- counts: [2]*summaryCounts{{}, {}},
- }
- s.init(s) // Init self-collection.
- s.createdTs = timestamppb.New(opts.now())
- return s
- }
-
- s := &summary{
- desc: desc,
-
- objectives: opts.Objectives,
- sortedObjectives: make([]float64, 0, len(opts.Objectives)),
-
- labelPairs: MakeLabelPairs(desc, labelValues),
-
- hotBuf: make([]float64, 0, opts.BufCap),
- coldBuf: make([]float64, 0, opts.BufCap),
- streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
- }
- s.headStreamExpTime = opts.now().Add(s.streamDuration)
- s.hotBufExpTime = s.headStreamExpTime
-
- for i := uint32(0); i < opts.AgeBuckets; i++ {
- s.streams = append(s.streams, s.newStream())
- }
- s.headStream = s.streams[0]
-
- for qu := range s.objectives {
- s.sortedObjectives = append(s.sortedObjectives, qu)
- }
- sort.Float64s(s.sortedObjectives)
-
- s.init(s) // Init self-collection.
- s.createdTs = timestamppb.New(opts.now())
- return s
-}
-
-type summary struct {
- selfCollector
-
- bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
- mtx sync.Mutex // Protects every other moving part.
- // Lock bufMtx before mtx if both are needed.
-
- desc *Desc
-
- objectives map[float64]float64
- sortedObjectives []float64
-
- labelPairs []*dto.LabelPair
-
- sum float64
- cnt uint64
-
- hotBuf, coldBuf []float64
-
- streams []*quantile.Stream
- streamDuration time.Duration
- headStream *quantile.Stream
- headStreamIdx int
- headStreamExpTime, hotBufExpTime time.Time
-
- createdTs *timestamppb.Timestamp
-}
-
-func (s *summary) Desc() *Desc {
- return s.desc
-}
-
-func (s *summary) Observe(v float64) {
- s.bufMtx.Lock()
- defer s.bufMtx.Unlock()
-
- now := time.Now()
- if now.After(s.hotBufExpTime) {
- s.asyncFlush(now)
- }
- s.hotBuf = append(s.hotBuf, v)
- if len(s.hotBuf) == cap(s.hotBuf) {
- s.asyncFlush(now)
- }
-}
-
-func (s *summary) Write(out *dto.Metric) error {
- sum := &dto.Summary{
- CreatedTimestamp: s.createdTs,
- }
- qs := make([]*dto.Quantile, 0, len(s.objectives))
-
- s.bufMtx.Lock()
- s.mtx.Lock()
- // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
- s.swapBufs(time.Now())
- s.bufMtx.Unlock()
-
- s.flushColdBuf()
- sum.SampleCount = proto.Uint64(s.cnt)
- sum.SampleSum = proto.Float64(s.sum)
-
- for _, rank := range s.sortedObjectives {
- var q float64
- if s.headStream.Count() == 0 {
- q = math.NaN()
- } else {
- q = s.headStream.Query(rank)
- }
- qs = append(qs, &dto.Quantile{
- Quantile: proto.Float64(rank),
- Value: proto.Float64(q),
- })
- }
-
- s.mtx.Unlock()
-
- if len(qs) > 0 {
- sort.Sort(quantSort(qs))
- }
- sum.Quantile = qs
-
- out.Summary = sum
- out.Label = s.labelPairs
- return nil
-}
-
-func (s *summary) newStream() *quantile.Stream {
- return quantile.NewTargeted(s.objectives)
-}
-
-// asyncFlush needs bufMtx locked.
-func (s *summary) asyncFlush(now time.Time) {
- s.mtx.Lock()
- s.swapBufs(now)
-
- // Unblock the original goroutine that was responsible for the mutation
- // that triggered the compaction. But hold onto the global non-buffer
- // state mutex until the operation finishes.
- go func() {
- s.flushColdBuf()
- s.mtx.Unlock()
- }()
-}
-
-// rotateStreams needs mtx AND bufMtx locked.
-func (s *summary) maybeRotateStreams() {
- for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
- s.headStream.Reset()
- s.headStreamIdx++
- if s.headStreamIdx >= len(s.streams) {
- s.headStreamIdx = 0
- }
- s.headStream = s.streams[s.headStreamIdx]
- s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
- }
-}
-
-// flushColdBuf needs mtx locked.
-func (s *summary) flushColdBuf() {
- for _, v := range s.coldBuf {
- for _, stream := range s.streams {
- stream.Insert(v)
- }
- s.cnt++
- s.sum += v
- }
- s.coldBuf = s.coldBuf[0:0]
- s.maybeRotateStreams()
-}
-
-// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
-func (s *summary) swapBufs(now time.Time) {
- if len(s.coldBuf) != 0 {
- panic("coldBuf is not empty")
- }
- s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
- // hotBuf is now empty and gets new expiration set.
- for now.After(s.hotBufExpTime) {
- s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
- }
-}
-
-type summaryCounts struct {
- // sumBits contains the bits of the float64 representing the sum of all
- // observations. sumBits and count have to go first in the struct to
- // guarantee alignment for atomic operations.
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- sumBits uint64
- count uint64
-}
-
-type noObjectivesSummary struct {
- // countAndHotIdx enables lock-free writes with use of atomic updates.
- // The most significant bit is the hot index [0 or 1] of the count field
- // below. Observe calls update the hot one. All remaining bits count the
- // number of Observe calls. Observe starts by incrementing this counter,
- // and finish by incrementing the count field in the respective
- // summaryCounts, as a marker for completion.
- //
- // Calls of the Write method (which are non-mutating reads from the
- // perspective of the summary) swap the hot–cold under the writeMtx
- // lock. A cooldown is awaited (while locked) by comparing the number of
- // observations with the initiation count. Once they match, then the
- // last observation on the now cool one has completed. All cool fields must
- // be merged into the new hot before releasing writeMtx.
-
- // Fields with atomic access first! See alignment constraint:
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- countAndHotIdx uint64
-
- selfCollector
- desc *Desc
- writeMtx sync.Mutex // Only used in the Write method.
-
- // Two counts, one is "hot" for lock-free observations, the other is
- // "cold" for writing out a dto.Metric. It has to be an array of
- // pointers to guarantee 64bit alignment of the histogramCounts, see
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
- counts [2]*summaryCounts
-
- labelPairs []*dto.LabelPair
-
- createdTs *timestamppb.Timestamp
-}
-
-func (s *noObjectivesSummary) Desc() *Desc {
- return s.desc
-}
-
-func (s *noObjectivesSummary) Observe(v float64) {
- // We increment h.countAndHotIdx so that the counter in the lower
- // 63 bits gets incremented. At the same time, we get the new value
- // back, which we can use to find the currently-hot counts.
- n := atomic.AddUint64(&s.countAndHotIdx, 1)
- hotCounts := s.counts[n>>63]
-
- for {
- oldBits := atomic.LoadUint64(&hotCounts.sumBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
- if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
- break
- }
- }
- // Increment count last as we take it as a signal that the observation
- // is complete.
- atomic.AddUint64(&hotCounts.count, 1)
-}
-
-func (s *noObjectivesSummary) Write(out *dto.Metric) error {
- // For simplicity, we protect this whole method by a mutex. It is not in
- // the hot path, i.e. Observe is called much more often than Write. The
- // complication of making Write lock-free isn't worth it, if possible at
- // all.
- s.writeMtx.Lock()
- defer s.writeMtx.Unlock()
-
- // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
- // without touching the count bits. See the struct comments for a full
- // description of the algorithm.
- n := atomic.AddUint64(&s.countAndHotIdx, 1<<63)
- // count is contained unchanged in the lower 63 bits.
- count := n & ((1 << 63) - 1)
- // The most significant bit tells us which counts is hot. The complement
- // is thus the cold one.
- hotCounts := s.counts[n>>63]
- coldCounts := s.counts[(^n)>>63]
-
- // Await cooldown.
- for count != atomic.LoadUint64(&coldCounts.count) {
- runtime.Gosched() // Let observations get work done.
- }
-
- sum := &dto.Summary{
- SampleCount: proto.Uint64(count),
- SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
- CreatedTimestamp: s.createdTs,
- }
-
- out.Summary = sum
- out.Label = s.labelPairs
-
- // Finally add all the cold counts to the new hot counts and reset the cold counts.
- atomic.AddUint64(&hotCounts.count, count)
- atomic.StoreUint64(&coldCounts.count, 0)
- for {
- oldBits := atomic.LoadUint64(&hotCounts.sumBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
- if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
- atomic.StoreUint64(&coldCounts.sumBits, 0)
- break
- }
- }
- return nil
-}
-
-type quantSort []*dto.Quantile
-
-func (s quantSort) Len() int {
- return len(s)
-}
-
-func (s quantSort) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s quantSort) Less(i, j int) bool {
- return s[i].GetQuantile() < s[j].GetQuantile()
-}
-
-// SummaryVec is a Collector that bundles a set of Summaries that all share the
-// same Desc, but have different values for their variable labels. This is used
-// if you want to count the same thing partitioned by various dimensions
-// (e.g. HTTP request latencies, partitioned by status code and method). Create
-// instances with NewSummaryVec.
-type SummaryVec struct {
- *MetricVec
-}
-
-// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
-// partitioned by the given label names.
-//
-// Due to the way a Summary is represented in the Prometheus text format and how
-// it is handled by the Prometheus server internally, “quantile” is an illegal
-// label name. NewSummaryVec will panic if this label name is used.
-func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
- return V2.NewSummaryVec(SummaryVecOpts{
- SummaryOpts: opts,
- VariableLabels: UnconstrainedLabels(labelNames),
- })
-}
-
-// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts.
-func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec {
- for _, ln := range opts.VariableLabels.labelNames() {
- if ln == quantileLabel {
- panic(errQuantileLabelNotAllowed)
- }
- }
- desc := V2.NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- opts.VariableLabels,
- opts.ConstLabels,
- )
- return &SummaryVec{
- MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- return newSummary(desc, opts.SummaryOpts, lvs...)
- }),
- }
-}
-
-// GetMetricWithLabelValues returns the Summary for the given slice of label
-// values (same order as the variable labels in Desc). If that combination of
-// label values is accessed for the first time, a new Summary is created.
-//
-// It is possible to call this method without using the returned Summary to only
-// create the new Summary but leave it at its starting value, a Summary without
-// any observations.
-//
-// Keeping the Summary for later use is possible (and should be considered if
-// performance is critical), but keep in mind that Reset, DeleteLabelValues and
-// Delete can be used to delete the Summary from the SummaryVec. In that case,
-// the Summary will still exist, but it will not be exported anymore, even if a
-// Summary with the same label values is created later. See also the CounterVec
-// example.
-//
-// An error is returned if the number of label values is not the same as the
-// number of variable labels in Desc (minus any curried labels).
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
-// an alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the GaugeVec example.
-func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
- metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Observer), err
- }
- return nil, err
-}
-
-// GetMetricWith returns the Summary for the given Labels map (the label names
-// must match those of the variable labels in Desc). If that label map is
-// accessed for the first time, a new Summary is created. Implications of
-// creating a Summary without using it and keeping the Summary for later use are
-// the same as for GetMetricWithLabelValues.
-//
-// An error is returned if the number and names of the Labels are inconsistent
-// with those of the variable labels in Desc (minus any curried labels).
-//
-// This method is used for the same purpose as
-// GetMetricWithLabelValues(...string). See there for pros and cons of the two
-// methods.
-func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
- metric, err := v.MetricVec.GetMetricWith(labels)
- if metric != nil {
- return metric.(Observer), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. Not returning an
-// error allows shortcuts like
-//
-// myVec.WithLabelValues("404", "GET").Observe(42.21)
-func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
- s, err := v.GetMetricWithLabelValues(lvs...)
- if err != nil {
- panic(err)
- }
- return s
-}
-
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. Not returning an error allows shortcuts like
-//
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
-func (v *SummaryVec) With(labels Labels) Observer {
- s, err := v.GetMetricWith(labels)
- if err != nil {
- panic(err)
- }
- return s
-}
-
-// CurryWith returns a vector curried with the provided labels, i.e. the
-// returned vector has those labels pre-set for all labeled operations performed
-// on it. The cardinality of the curried vector is reduced accordingly. The
-// order of the remaining labels stays the same (just with the curried labels
-// taken out of the sequence – which is relevant for the
-// (GetMetric)WithLabelValues methods). It is possible to curry a curried
-// vector, but only with labels not yet used for currying before.
-//
-// The metrics contained in the SummaryVec are shared between the curried and
-// uncurried vectors. They are just accessed differently. Curried and uncurried
-// vectors behave identically in terms of collection. Only one must be
-// registered with a given registry (usually the uncurried version). The Reset
-// method deletes all metrics, even if called on a curried vector.
-func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
- vec, err := v.MetricVec.CurryWith(labels)
- if vec != nil {
- return &SummaryVec{vec}, err
- }
- return nil, err
-}
-
-// MustCurryWith works as CurryWith but panics where CurryWith would have
-// returned an error.
-func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec {
- vec, err := v.CurryWith(labels)
- if err != nil {
- panic(err)
- }
- return vec
-}
-
-type constSummary struct {
- desc *Desc
- count uint64
- sum float64
- quantiles map[float64]float64
- labelPairs []*dto.LabelPair
- createdTs *timestamppb.Timestamp
-}
-
-func (s *constSummary) Desc() *Desc {
- return s.desc
-}
-
-func (s *constSummary) Write(out *dto.Metric) error {
- sum := &dto.Summary{
- CreatedTimestamp: s.createdTs,
- }
- qs := make([]*dto.Quantile, 0, len(s.quantiles))
-
- sum.SampleCount = proto.Uint64(s.count)
- sum.SampleSum = proto.Float64(s.sum)
-
- for rank, q := range s.quantiles {
- qs = append(qs, &dto.Quantile{
- Quantile: proto.Float64(rank),
- Value: proto.Float64(q),
- })
- }
-
- if len(qs) > 0 {
- sort.Sort(quantSort(qs))
- }
- sum.Quantile = qs
-
- out.Summary = sum
- out.Label = s.labelPairs
-
- return nil
-}
-
-// NewConstSummary returns a metric representing a Prometheus summary with fixed
-// values for the count, sum, and quantiles. As those parameters cannot be
-// changed, the returned value does not implement the Summary interface (but
-// only the Metric interface). Users of this package will not have much use for
-// it in regular operations. However, when implementing custom Collectors, it is
-// useful as a throw-away metric that is generated on the fly to send it to
-// Prometheus in the Collect method.
-//
-// quantiles maps ranks to quantile values. For example, a median latency of
-// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
-//
-// map[float64]float64{0.5: 0.23, 0.99: 0.56}
-//
-// NewConstSummary returns an error if the length of labelValues is not
-// consistent with the variable labels in Desc or if Desc is invalid.
-func NewConstSummary(
- desc *Desc,
- count uint64,
- sum float64,
- quantiles map[float64]float64,
- labelValues ...string,
-) (Metric, error) {
- if desc.err != nil {
- return nil, desc.err
- }
- if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
- return nil, err
- }
- return &constSummary{
- desc: desc,
- count: count,
- sum: sum,
- quantiles: quantiles,
- labelPairs: MakeLabelPairs(desc, labelValues),
- }, nil
-}
-
-// MustNewConstSummary is a version of NewConstSummary that panics where
-// NewConstMetric would have returned an error.
-func MustNewConstSummary(
- desc *Desc,
- count uint64,
- sum float64,
- quantiles map[float64]float64,
- labelValues ...string,
-) Metric {
- m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
- if err != nil {
- panic(err)
- }
- return m
-}
-
-// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp.
-func NewConstSummaryWithCreatedTimestamp(
- desc *Desc,
- count uint64,
- sum float64,
- quantiles map[float64]float64,
- ct time.Time,
- labelValues ...string,
-) (Metric, error) {
- if desc.err != nil {
- return nil, desc.err
- }
- if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
- return nil, err
- }
- return &constSummary{
- desc: desc,
- count: count,
- sum: sum,
- quantiles: quantiles,
- labelPairs: MakeLabelPairs(desc, labelValues),
- createdTs: timestamppb.New(ct),
- }, nil
-}
-
-// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where
-// NewConstSummaryWithCreatedTimestamp would have returned an error.
-func MustNewConstSummaryWithCreatedTimestamp(
- desc *Desc,
- count uint64,
- sum float64,
- quantiles map[float64]float64,
- ct time.Time,
- labelValues ...string,
-) Metric {
- m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...)
- if err != nil {
- panic(err)
- }
- return m
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
deleted file mode 100644
index 52344fef5..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2016 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import "time"
-
-// Timer is a helper type to time functions. Use NewTimer to create new
-// instances.
-type Timer struct {
- begin time.Time
- observer Observer
-}
-
-// NewTimer creates a new Timer. The provided Observer is used to observe a
-// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar
-// later on will be also supported.
-// Timer is usually used to time a function call in the
-// following way:
-//
-// func TimeMe() {
-// timer := NewTimer(myHistogram)
-// defer timer.ObserveDuration()
-// // Do actual work.
-// }
-//
-// or
-//
-// func TimeMeWithExemplar() {
-// timer := NewTimer(myHistogram)
-// defer timer.ObserveDurationWithExemplar(exemplar)
-// // Do actual work.
-// }
-func NewTimer(o Observer) *Timer {
- return &Timer{
- begin: time.Now(),
- observer: o,
- }
-}
-
-// ObserveDuration records the duration passed since the Timer was created with
-// NewTimer. It calls the Observe method of the Observer provided during
-// construction with the duration in seconds as an argument. The observed
-// duration is also returned. ObserveDuration is usually called with a defer
-// statement.
-//
-// Note that this method is only guaranteed to never observe negative durations
-// if used with Go1.9+.
-func (t *Timer) ObserveDuration() time.Duration {
- d := time.Since(t.begin)
- if t.observer != nil {
- t.observer.Observe(d.Seconds())
- }
- return d
-}
-
-// ObserveDurationWithExemplar is like ObserveDuration, but it will also
-// observe exemplar with the duration unless exemplar is nil or provided Observer can't
-// be casted to ExemplarObserver.
-func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration {
- d := time.Since(t.begin)
- eo, ok := t.observer.(ExemplarObserver)
- if ok && exemplar != nil {
- eo.ObserveWithExemplar(d.Seconds(), exemplar)
- return d
- }
- if t.observer != nil {
- t.observer.Observe(d.Seconds())
- }
- return d
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
deleted file mode 100644
index 0f9ce63f4..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-// UntypedOpts is an alias for Opts. See there for doc comments.
-type UntypedOpts Opts
-
-// UntypedFunc works like GaugeFunc but the collected metric is of type
-// "Untyped". UntypedFunc is useful to mirror an external metric of unknown
-// type.
-//
-// To create UntypedFunc instances, use NewUntypedFunc.
-type UntypedFunc interface {
- Metric
- Collector
-}
-
-// NewUntypedFunc creates a new UntypedFunc based on the provided
-// UntypedOpts. The value reported is determined by calling the given function
-// from within the Write method. Take into account that metric collection may
-// happen concurrently. If that results in concurrent calls to Write, like in
-// the case where an UntypedFunc is directly registered with Prometheus, the
-// provided function must be concurrency-safe.
-func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
- return newValueFunc(NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ), UntypedValue, function)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
deleted file mode 100644
index cc23011fa..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ /dev/null
@@ -1,274 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "errors"
- "fmt"
- "sort"
- "time"
- "unicode/utf8"
-
- "github.com/prometheus/client_golang/prometheus/internal"
-
- dto "github.com/prometheus/client_model/go"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-// ValueType is an enumeration of metric types that represent a simple value.
-type ValueType int
-
-// Possible values for the ValueType enum. Use UntypedValue to mark a metric
-// with an unknown type.
-const (
- _ ValueType = iota
- CounterValue
- GaugeValue
- UntypedValue
-)
-
-var (
- CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }()
- GaugeMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }()
- UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }()
-)
-
-func (v ValueType) ToDTO() *dto.MetricType {
- switch v {
- case CounterValue:
- return CounterMetricTypePtr
- case GaugeValue:
- return GaugeMetricTypePtr
- default:
- return UntypedMetricTypePtr
- }
-}
-
-// valueFunc is a generic metric for simple values retrieved on collect time
-// from a function. It implements Metric and Collector. Its effective type is
-// determined by ValueType. This is a low-level building block used by the
-// library to back the implementations of CounterFunc, GaugeFunc, and
-// UntypedFunc.
-type valueFunc struct {
- selfCollector
-
- desc *Desc
- valType ValueType
- function func() float64
- labelPairs []*dto.LabelPair
-}
-
-// newValueFunc returns a newly allocated valueFunc with the given Desc and
-// ValueType. The value reported is determined by calling the given function
-// from within the Write method. Take into account that metric collection may
-// happen concurrently. If that results in concurrent calls to Write, like in
-// the case where a valueFunc is directly registered with Prometheus, the
-// provided function must be concurrency-safe.
-func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
- result := &valueFunc{
- desc: desc,
- valType: valueType,
- function: function,
- labelPairs: MakeLabelPairs(desc, nil),
- }
- result.init(result)
- return result
-}
-
-func (v *valueFunc) Desc() *Desc {
- return v.desc
-}
-
-func (v *valueFunc) Write(out *dto.Metric) error {
- return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil)
-}
-
-// NewConstMetric returns a metric with one fixed value that cannot be
-// changed. Users of this package will not have much use for it in regular
-// operations. However, when implementing custom Collectors, it is useful as a
-// throw-away metric that is generated on the fly to send it to Prometheus in
-// the Collect method. NewConstMetric returns an error if the length of
-// labelValues is not consistent with the variable labels in Desc or if Desc is
-// invalid.
-func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
- if desc.err != nil {
- return nil, desc.err
- }
- if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
- return nil, err
- }
-
- metric := &dto.Metric{}
- if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil {
- return nil, err
- }
-
- return &constMetric{
- desc: desc,
- metric: metric,
- }, nil
-}
-
-// MustNewConstMetric is a version of NewConstMetric that panics where
-// NewConstMetric would have returned an error.
-func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
- m, err := NewConstMetric(desc, valueType, value, labelValues...)
- if err != nil {
- panic(err)
- }
- return m
-}
-
-// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters
-// with created timestamp set and returns an error for other metric types.
-func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) {
- if desc.err != nil {
- return nil, desc.err
- }
- if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
- return nil, err
- }
- switch valueType {
- case CounterValue:
- break
- default:
- return nil, errors.New("created timestamps are only supported for counters")
- }
-
- metric := &dto.Metric{}
- if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil {
- return nil, err
- }
-
- return &constMetric{
- desc: desc,
- metric: metric,
- }, nil
-}
-
-// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where
-// NewConstMetricWithCreatedTimestamp would have returned an error.
-func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric {
- m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...)
- if err != nil {
- panic(err)
- }
- return m
-}
-
-type constMetric struct {
- desc *Desc
- metric *dto.Metric
-}
-
-func (m *constMetric) Desc() *Desc {
- return m.desc
-}
-
-func (m *constMetric) Write(out *dto.Metric) error {
- out.Label = m.metric.Label
- out.Counter = m.metric.Counter
- out.Gauge = m.metric.Gauge
- out.Untyped = m.metric.Untyped
- return nil
-}
-
-func populateMetric(
- t ValueType,
- v float64,
- labelPairs []*dto.LabelPair,
- e *dto.Exemplar,
- m *dto.Metric,
- ct *timestamppb.Timestamp,
-) error {
- m.Label = labelPairs
- switch t {
- case CounterValue:
- m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct}
- case GaugeValue:
- m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
- case UntypedValue:
- m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
- default:
- return fmt.Errorf("encountered unknown type %v", t)
- }
- return nil
-}
-
-// MakeLabelPairs is a helper function to create protobuf LabelPairs from the
-// variable and constant labels in the provided Desc. The values for the
-// variable labels are defined by the labelValues slice, which must be in the
-// same order as the corresponding variable labels in the Desc.
-//
-// This function is only needed for custom Metric implementations. See MetricVec
-// example.
-func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
- totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs)
- if totalLen == 0 {
- // Super fast path.
- return nil
- }
- if len(desc.variableLabels.names) == 0 {
- // Moderately fast path.
- return desc.constLabelPairs
- }
- labelPairs := make([]*dto.LabelPair, 0, totalLen)
- for i, l := range desc.variableLabels.names {
- labelPairs = append(labelPairs, &dto.LabelPair{
- Name: proto.String(l),
- Value: proto.String(labelValues[i]),
- })
- }
- labelPairs = append(labelPairs, desc.constLabelPairs...)
- sort.Sort(internal.LabelPairSorter(labelPairs))
- return labelPairs
-}
-
-// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
-const ExemplarMaxRunes = 128
-
-// newExemplar creates a new dto.Exemplar from the provided values. An error is
-// returned if any of the label names or values are invalid or if the total
-// number of runes in the label names and values exceeds ExemplarMaxRunes.
-func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
- e := &dto.Exemplar{}
- e.Value = proto.Float64(value)
- tsProto := timestamppb.New(ts)
- if err := tsProto.CheckValid(); err != nil {
- return nil, err
- }
- e.Timestamp = tsProto
- labelPairs := make([]*dto.LabelPair, 0, len(l))
- var runes int
- for name, value := range l {
- if !checkLabelName(name) {
- return nil, fmt.Errorf("exemplar label name %q is invalid", name)
- }
- runes += utf8.RuneCountInString(name)
- if !utf8.ValidString(value) {
- return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value)
- }
- runes += utf8.RuneCountInString(value)
- labelPairs = append(labelPairs, &dto.LabelPair{
- Name: proto.String(name),
- Value: proto.String(value),
- })
- }
- if runes > ExemplarMaxRunes {
- return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes)
- }
- e.Label = labelPairs
- return e, nil
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
deleted file mode 100644
index 2c808eece..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ /dev/null
@@ -1,709 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "fmt"
- "sync"
-
- "github.com/prometheus/common/model"
-)
-
-// MetricVec is a Collector to bundle metrics of the same name that differ in
-// their label values. MetricVec is not used directly but as a building block
-// for implementations of vectors of a given metric type, like GaugeVec,
-// CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be
-// used for custom Metric implementations.
-//
-// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in
-// FooVec and initialize it with NewMetricVec. Implement wrappers for
-// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather
-// than (Metric, error). Similarly, create a wrapper for CurryWith that returns
-// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also
-// add the convenience methods WithLabelValues, With, and MustCurryWith, which
-// panic instead of returning errors. See also the MetricVec example.
-type MetricVec struct {
- *metricMap
-
- curry []curriedLabelValue
-
- // hashAdd and hashAddByte can be replaced for testing collision handling.
- hashAdd func(h uint64, s string) uint64
- hashAddByte func(h uint64, b byte) uint64
-}
-
-// NewMetricVec returns an initialized metricVec.
-func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
- return &MetricVec{
- metricMap: &metricMap{
- metrics: map[uint64][]metricWithLabelValues{},
- desc: desc,
- newMetric: newMetric,
- },
- hashAdd: hashAdd,
- hashAddByte: hashAddByte,
- }
-}
-
-// DeleteLabelValues removes the metric where the variable labels are the same
-// as those passed in as labels (same order as the VariableLabels in Desc). It
-// returns true if a metric was deleted.
-//
-// It is not an error if the number of label values is not the same as the
-// number of VariableLabels in Desc. However, such inconsistent label count can
-// never match an actual metric, so the method will always return false in that
-// case.
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider Delete(Labels) as an
-// alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the CounterVec example.
-func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
- lvs = constrainLabelValues(m.desc, lvs, m.curry)
-
- h, err := m.hashLabelValues(lvs)
- if err != nil {
- return false
- }
-
- return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
-}
-
-// Delete deletes the metric where the variable labels are the same as those
-// passed in as labels. It returns true if a metric was deleted.
-//
-// It is not an error if the number and names of the Labels are inconsistent
-// with those of the VariableLabels in Desc. However, such inconsistent Labels
-// can never match an actual metric, so the method will always return false in
-// that case.
-//
-// This method is used for the same purpose as DeleteLabelValues(...string). See
-// there for pros and cons of the two methods.
-func (m *MetricVec) Delete(labels Labels) bool {
- labels, closer := constrainLabels(m.desc, labels)
- defer closer()
-
- h, err := m.hashLabels(labels)
- if err != nil {
- return false
- }
-
- return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
-}
-
-// DeletePartialMatch deletes all metrics where the variable labels contain all of those
-// passed in as labels. The order of the labels does not matter.
-// It returns the number of metrics deleted.
-//
-// Note that curried labels will never be matched if deleting from the curried vector.
-// To match curried labels with DeletePartialMatch, it must be called on the base vector.
-func (m *MetricVec) DeletePartialMatch(labels Labels) int {
- labels, closer := constrainLabels(m.desc, labels)
- defer closer()
-
- return m.metricMap.deleteByLabels(labels, m.curry)
-}
-
-// Without explicit forwarding of Describe, Collect, Reset, those methods won't
-// show up in GoDoc.
-
-// Describe implements Collector.
-func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
-
-// Collect implements Collector.
-func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
-
-// Reset deletes all metrics in this vector.
-func (m *MetricVec) Reset() { m.metricMap.Reset() }
-
-// CurryWith returns a vector curried with the provided labels, i.e. the
-// returned vector has those labels pre-set for all labeled operations performed
-// on it. The cardinality of the curried vector is reduced accordingly. The
-// order of the remaining labels stays the same (just with the curried labels
-// taken out of the sequence – which is relevant for the
-// (GetMetric)WithLabelValues methods). It is possible to curry a curried
-// vector, but only with labels not yet used for currying before.
-//
-// The metrics contained in the MetricVec are shared between the curried and
-// uncurried vectors. They are just accessed differently. Curried and uncurried
-// vectors behave identically in terms of collection. Only one must be
-// registered with a given registry (usually the uncurried version). The Reset
-// method deletes all metrics, even if called on a curried vector.
-//
-// Note that CurryWith is usually not called directly but through a wrapper
-// around MetricVec, implementing a vector for a specific Metric
-// implementation, for example GaugeVec.
-func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
- var (
- newCurry []curriedLabelValue
- oldCurry = m.curry
- iCurry int
- )
- for i, labelName := range m.desc.variableLabels.names {
- val, ok := labels[labelName]
- if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
- if ok {
- return nil, fmt.Errorf("label name %q is already curried", labelName)
- }
- newCurry = append(newCurry, oldCurry[iCurry])
- iCurry++
- } else {
- if !ok {
- continue // Label stays uncurried.
- }
- newCurry = append(newCurry, curriedLabelValue{
- i,
- m.desc.variableLabels.constrain(labelName, val),
- })
- }
- }
- if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
- return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
- }
-
- return &MetricVec{
- metricMap: m.metricMap,
- curry: newCurry,
- hashAdd: m.hashAdd,
- hashAddByte: m.hashAddByte,
- }, nil
-}
-
-// GetMetricWithLabelValues returns the Metric for the given slice of label
-// values (same order as the variable labels in Desc). If that combination of
-// label values is accessed for the first time, a new Metric is created (by
-// calling the newMetric function provided during construction of the
-// MetricVec).
-//
-// It is possible to call this method without using the returned Metric to only
-// create the new Metric but leave it in its initial state.
-//
-// Keeping the Metric for later use is possible (and should be considered if
-// performance is critical), but keep in mind that Reset, DeleteLabelValues and
-// Delete can be used to delete the Metric from the MetricVec. In that case, the
-// Metric will still exist, but it will not be exported anymore, even if a
-// Metric with the same label values is created later.
-//
-// An error is returned if the number of label values is not the same as the
-// number of variable labels in Desc (minus any curried labels).
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
-// an alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-//
-// Note that GetMetricWithLabelValues is usually not called directly but through
-// a wrapper around MetricVec, implementing a vector for a specific Metric
-// implementation, for example GaugeVec.
-func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
- lvs = constrainLabelValues(m.desc, lvs, m.curry)
- h, err := m.hashLabelValues(lvs)
- if err != nil {
- return nil, err
- }
-
- return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
-}
-
-// GetMetricWith returns the Metric for the given Labels map (the label names
-// must match those of the variable labels in Desc). If that label map is
-// accessed for the first time, a new Metric is created. Implications of
-// creating a Metric without using it and keeping the Metric for later use
-// are the same as for GetMetricWithLabelValues.
-//
-// An error is returned if the number and names of the Labels are inconsistent
-// with those of the variable labels in Desc (minus any curried labels).
-//
-// This method is used for the same purpose as
-// GetMetricWithLabelValues(...string). See there for pros and cons of the two
-// methods.
-//
-// Note that GetMetricWith is usually not called directly but through a wrapper
-// around MetricVec, implementing a vector for a specific Metric implementation,
-// for example GaugeVec.
-func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
- labels, closer := constrainLabels(m.desc, labels)
- defer closer()
-
- h, err := m.hashLabels(labels)
- if err != nil {
- return nil, err
- }
-
- return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
-}
-
-func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
- if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
- return 0, err
- }
-
- var (
- h = hashNew()
- curry = m.curry
- iVals, iCurry int
- )
- for i := 0; i < len(m.desc.variableLabels.names); i++ {
- if iCurry < len(curry) && curry[iCurry].index == i {
- h = m.hashAdd(h, curry[iCurry].value)
- iCurry++
- } else {
- h = m.hashAdd(h, vals[iVals])
- iVals++
- }
- h = m.hashAddByte(h, model.SeparatorByte)
- }
- return h, nil
-}
-
-func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
- if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
- return 0, err
- }
-
- var (
- h = hashNew()
- curry = m.curry
- iCurry int
- )
- for i, labelName := range m.desc.variableLabels.names {
- val, ok := labels[labelName]
- if iCurry < len(curry) && curry[iCurry].index == i {
- if ok {
- return 0, fmt.Errorf("label name %q is already curried", labelName)
- }
- h = m.hashAdd(h, curry[iCurry].value)
- iCurry++
- } else {
- if !ok {
- return 0, fmt.Errorf("label name %q missing in label map", labelName)
- }
- h = m.hashAdd(h, val)
- }
- h = m.hashAddByte(h, model.SeparatorByte)
- }
- return h, nil
-}
-
-// metricWithLabelValues provides the metric and its label values for
-// disambiguation on hash collision.
-type metricWithLabelValues struct {
- values []string
- metric Metric
-}
-
-// curriedLabelValue sets the curried value for a label at the given index.
-type curriedLabelValue struct {
- index int
- value string
-}
-
-// metricMap is a helper for metricVec and shared between differently curried
-// metricVecs.
-type metricMap struct {
- mtx sync.RWMutex // Protects metrics.
- metrics map[uint64][]metricWithLabelValues
- desc *Desc
- newMetric func(labelValues ...string) Metric
-}
-
-// Describe implements Collector. It will send exactly one Desc to the provided
-// channel.
-func (m *metricMap) Describe(ch chan<- *Desc) {
- ch <- m.desc
-}
-
-// Collect implements Collector.
-func (m *metricMap) Collect(ch chan<- Metric) {
- m.mtx.RLock()
- defer m.mtx.RUnlock()
-
- for _, metrics := range m.metrics {
- for _, metric := range metrics {
- ch <- metric.metric
- }
- }
-}
-
-// Reset deletes all metrics in this vector.
-func (m *metricMap) Reset() {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- for h := range m.metrics {
- delete(m.metrics, h)
- }
-}
-
-// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
-// there are multiple matches in the bucket, use lvs to select a metric and
-// remove only that metric.
-func (m *metricMap) deleteByHashWithLabelValues(
- h uint64, lvs []string, curry []curriedLabelValue,
-) bool {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- metrics, ok := m.metrics[h]
- if !ok {
- return false
- }
-
- i := findMetricWithLabelValues(metrics, lvs, curry)
- if i >= len(metrics) {
- return false
- }
-
- if len(metrics) > 1 {
- old := metrics
- m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
- old[len(old)-1] = metricWithLabelValues{}
- } else {
- delete(m.metrics, h)
- }
- return true
-}
-
-// deleteByHashWithLabels removes the metric from the hash bucket h. If there
-// are multiple matches in the bucket, use lvs to select a metric and remove
-// only that metric.
-func (m *metricMap) deleteByHashWithLabels(
- h uint64, labels Labels, curry []curriedLabelValue,
-) bool {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- metrics, ok := m.metrics[h]
- if !ok {
- return false
- }
- i := findMetricWithLabels(m.desc, metrics, labels, curry)
- if i >= len(metrics) {
- return false
- }
-
- if len(metrics) > 1 {
- old := metrics
- m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
- old[len(old)-1] = metricWithLabelValues{}
- } else {
- delete(m.metrics, h)
- }
- return true
-}
-
-// deleteByLabels deletes a metric if the given labels are present in the metric.
-func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- var numDeleted int
-
- for h, metrics := range m.metrics {
- i := findMetricWithPartialLabels(m.desc, metrics, labels, curry)
- if i >= len(metrics) {
- // Didn't find matching labels in this metric slice.
- continue
- }
- delete(m.metrics, h)
- numDeleted++
- }
-
- return numDeleted
-}
-
-// findMetricWithPartialLabel returns the index of the matching metric or
-// len(metrics) if not found.
-func findMetricWithPartialLabels(
- desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
-) int {
- for i, metric := range metrics {
- if matchPartialLabels(desc, metric.values, labels, curry) {
- return i
- }
- }
- return len(metrics)
-}
-
-// indexOf searches the given slice of strings for the target string and returns
-// the index or len(items) as well as a boolean whether the search succeeded.
-func indexOf(target string, items []string) (int, bool) {
- for i, l := range items {
- if l == target {
- return i, true
- }
- }
- return len(items), false
-}
-
-// valueMatchesVariableOrCurriedValue determines if a value was previously curried,
-// and returns whether it matches either the "base" value or the curried value accordingly.
-// It also indicates whether the match is against a curried or uncurried value.
-func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) {
- for _, curriedValue := range curry {
- if curriedValue.index == index {
- // This label was curried. See if the curried value matches our target.
- return curriedValue.value == targetValue, true
- }
- }
- // This label was not curried. See if the current value matches our target label.
- return values[index] == targetValue, false
-}
-
-// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present.
-func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
- for l, v := range labels {
- // Check if the target label exists in our metrics and get the index.
- varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names)
- if validLabel {
- // Check the value of that label against the target value.
- // We don't consider curried values in partial matches.
- matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry)
- if matches && !curried {
- continue
- }
- }
- return false
- }
- return true
-}
-
-// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
-// or creates it and returns the new one.
-//
-// This function holds the mutex.
-func (m *metricMap) getOrCreateMetricWithLabelValues(
- hash uint64, lvs []string, curry []curriedLabelValue,
-) Metric {
- m.mtx.RLock()
- metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry)
- m.mtx.RUnlock()
- if ok {
- return metric
- }
-
- m.mtx.Lock()
- defer m.mtx.Unlock()
- metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry)
- if !ok {
- inlinedLVs := inlineLabelValues(lvs, curry)
- metric = m.newMetric(inlinedLVs...)
- m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric})
- }
- return metric
-}
-
-// getOrCreateMetricWithLabels retrieves the metric by hash and label value
-// or creates it and returns the new one.
-//
-// This function holds the mutex.
-func (m *metricMap) getOrCreateMetricWithLabels(
- hash uint64, labels Labels, curry []curriedLabelValue,
-) Metric {
- m.mtx.RLock()
- metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry)
- m.mtx.RUnlock()
- if ok {
- return metric
- }
-
- m.mtx.Lock()
- defer m.mtx.Unlock()
- metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry)
- if !ok {
- lvs := extractLabelValues(m.desc, labels, curry)
- metric = m.newMetric(lvs...)
- m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric})
- }
- return metric
-}
-
-// getMetricWithHashAndLabelValues gets a metric while handling possible
-// collisions in the hash space. Must be called while holding the read mutex.
-func (m *metricMap) getMetricWithHashAndLabelValues(
- h uint64, lvs []string, curry []curriedLabelValue,
-) (Metric, bool) {
- metrics, ok := m.metrics[h]
- if ok {
- if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) {
- return metrics[i].metric, true
- }
- }
- return nil, false
-}
-
-// getMetricWithHashAndLabels gets a metric while handling possible collisions in
-// the hash space. Must be called while holding read mutex.
-func (m *metricMap) getMetricWithHashAndLabels(
- h uint64, labels Labels, curry []curriedLabelValue,
-) (Metric, bool) {
- metrics, ok := m.metrics[h]
- if ok {
- if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) {
- return metrics[i].metric, true
- }
- }
- return nil, false
-}
-
-// findMetricWithLabelValues returns the index of the matching metric or
-// len(metrics) if not found.
-func findMetricWithLabelValues(
- metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue,
-) int {
- for i, metric := range metrics {
- if matchLabelValues(metric.values, lvs, curry) {
- return i
- }
- }
- return len(metrics)
-}
-
-// findMetricWithLabels returns the index of the matching metric or len(metrics)
-// if not found.
-func findMetricWithLabels(
- desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
-) int {
- for i, metric := range metrics {
- if matchLabels(desc, metric.values, labels, curry) {
- return i
- }
- }
- return len(metrics)
-}
-
-func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool {
- if len(values) != len(lvs)+len(curry) {
- return false
- }
- var iLVs, iCurry int
- for i, v := range values {
- if iCurry < len(curry) && curry[iCurry].index == i {
- if v != curry[iCurry].value {
- return false
- }
- iCurry++
- continue
- }
- if v != lvs[iLVs] {
- return false
- }
- iLVs++
- }
- return true
-}
-
-func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
- if len(values) != len(labels)+len(curry) {
- return false
- }
- iCurry := 0
- for i, k := range desc.variableLabels.names {
- if iCurry < len(curry) && curry[iCurry].index == i {
- if values[i] != curry[iCurry].value {
- return false
- }
- iCurry++
- continue
- }
- if values[i] != labels[k] {
- return false
- }
- }
- return true
-}
-
-func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
- labelValues := make([]string, len(labels)+len(curry))
- iCurry := 0
- for i, k := range desc.variableLabels.names {
- if iCurry < len(curry) && curry[iCurry].index == i {
- labelValues[i] = curry[iCurry].value
- iCurry++
- continue
- }
- labelValues[i] = labels[k]
- }
- return labelValues
-}
-
-func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
- labelValues := make([]string, len(lvs)+len(curry))
- var iCurry, iLVs int
- for i := range labelValues {
- if iCurry < len(curry) && curry[iCurry].index == i {
- labelValues[i] = curry[iCurry].value
- iCurry++
- continue
- }
- labelValues[i] = lvs[iLVs]
- iLVs++
- }
- return labelValues
-}
-
-var labelsPool = &sync.Pool{
- New: func() interface{} {
- return make(Labels)
- },
-}
-
-func constrainLabels(desc *Desc, labels Labels) (Labels, func()) {
- if len(desc.variableLabels.labelConstraints) == 0 {
- // Fast path when there's no constraints
- return labels, func() {}
- }
-
- constrainedLabels := labelsPool.Get().(Labels)
- for l, v := range labels {
- constrainedLabels[l] = desc.variableLabels.constrain(l, v)
- }
-
- return constrainedLabels, func() {
- for k := range constrainedLabels {
- delete(constrainedLabels, k)
- }
- labelsPool.Put(constrainedLabels)
- }
-}
-
-func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
- if len(desc.variableLabels.labelConstraints) == 0 {
- // Fast path when there's no constraints
- return lvs
- }
-
- constrainedValues := make([]string, len(lvs))
- var iCurry, iLVs int
- for i := 0; i < len(lvs)+len(curry); i++ {
- if iCurry < len(curry) && curry[iCurry].index == i {
- iCurry++
- continue
- }
-
- if i < len(desc.variableLabels.names) {
- constrainedValues[iLVs] = desc.variableLabels.constrain(
- desc.variableLabels.names[i],
- lvs[iLVs],
- )
- } else {
- constrainedValues[iLVs] = lvs[iLVs]
- }
- iLVs++
- }
- return constrainedValues
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
deleted file mode 100644
index 42bc3a8f0..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-type v2 struct{}
-
-// V2 is a struct that can be referenced to access experimental API that might
-// be present in v2 of client golang someday. It offers extended functionality
-// of v1 with slightly changed API. It is acceptable to use some pieces from v1
-// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc`
-// in the same codebase.
-var V2 = v2{}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
deleted file mode 100644
index 25da157f1..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "fmt"
- "sort"
-
- "github.com/prometheus/client_golang/prometheus/internal"
-
- dto "github.com/prometheus/client_model/go"
- "google.golang.org/protobuf/proto"
-)
-
-// WrapRegistererWith returns a Registerer wrapping the provided
-// Registerer. Collectors registered with the returned Registerer will be
-// registered with the wrapped Registerer in a modified way. The modified
-// Collector adds the provided Labels to all Metrics it collects (as
-// ConstLabels). The Metrics collected by the unmodified Collector must not
-// duplicate any of those labels. Wrapping a nil value is valid, resulting
-// in a no-op Registerer.
-//
-// WrapRegistererWith provides a way to add fixed labels to a subset of
-// Collectors. It should not be used to add fixed labels to all metrics
-// exposed. See also
-// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
-//
-// Conflicts between Collectors registered through the original Registerer with
-// Collectors registered through the wrapping Registerer will still be
-// detected. Any AlreadyRegisteredError returned by the Register method of
-// either Registerer will contain the ExistingCollector in the form it was
-// provided to the respective registry.
-//
-// The Collector example demonstrates a use of WrapRegistererWith.
-func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
- return &wrappingRegisterer{
- wrappedRegisterer: reg,
- labels: labels,
- }
-}
-
-// WrapRegistererWithPrefix returns a Registerer wrapping the provided
-// Registerer. Collectors registered with the returned Registerer will be
-// registered with the wrapped Registerer in a modified way. The modified
-// Collector adds the provided prefix to the name of all Metrics it collects.
-// Wrapping a nil value is valid, resulting in a no-op Registerer.
-//
-// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
-// a sub-system. To make this work, register metrics of the sub-system with the
-// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
-// to use the same prefix for all metrics exposed. In particular, do not prefix
-// metric names that are standardized across applications, as that would break
-// horizontal monitoring, for example the metrics provided by the Go collector
-// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
-// fact, those metrics are already prefixed with “go_” or “process_”,
-// respectively.)
-//
-// Conflicts between Collectors registered through the original Registerer with
-// Collectors registered through the wrapping Registerer will still be
-// detected. Any AlreadyRegisteredError returned by the Register method of
-// either Registerer will contain the ExistingCollector in the form it was
-// provided to the respective registry.
-func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
- return &wrappingRegisterer{
- wrappedRegisterer: reg,
- prefix: prefix,
- }
-}
-
-type wrappingRegisterer struct {
- wrappedRegisterer Registerer
- prefix string
- labels Labels
-}
-
-func (r *wrappingRegisterer) Register(c Collector) error {
- if r.wrappedRegisterer == nil {
- return nil
- }
- return r.wrappedRegisterer.Register(&wrappingCollector{
- wrappedCollector: c,
- prefix: r.prefix,
- labels: r.labels,
- })
-}
-
-func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
- if r.wrappedRegisterer == nil {
- return
- }
- for _, c := range cs {
- if err := r.Register(c); err != nil {
- panic(err)
- }
- }
-}
-
-func (r *wrappingRegisterer) Unregister(c Collector) bool {
- if r.wrappedRegisterer == nil {
- return false
- }
- return r.wrappedRegisterer.Unregister(&wrappingCollector{
- wrappedCollector: c,
- prefix: r.prefix,
- labels: r.labels,
- })
-}
-
-type wrappingCollector struct {
- wrappedCollector Collector
- prefix string
- labels Labels
-}
-
-func (c *wrappingCollector) Collect(ch chan<- Metric) {
- wrappedCh := make(chan Metric)
- go func() {
- c.wrappedCollector.Collect(wrappedCh)
- close(wrappedCh)
- }()
- for m := range wrappedCh {
- ch <- &wrappingMetric{
- wrappedMetric: m,
- prefix: c.prefix,
- labels: c.labels,
- }
- }
-}
-
-func (c *wrappingCollector) Describe(ch chan<- *Desc) {
- wrappedCh := make(chan *Desc)
- go func() {
- c.wrappedCollector.Describe(wrappedCh)
- close(wrappedCh)
- }()
- for desc := range wrappedCh {
- ch <- wrapDesc(desc, c.prefix, c.labels)
- }
-}
-
-func (c *wrappingCollector) unwrapRecursively() Collector {
- switch wc := c.wrappedCollector.(type) {
- case *wrappingCollector:
- return wc.unwrapRecursively()
- default:
- return wc
- }
-}
-
-type wrappingMetric struct {
- wrappedMetric Metric
- prefix string
- labels Labels
-}
-
-func (m *wrappingMetric) Desc() *Desc {
- return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
-}
-
-func (m *wrappingMetric) Write(out *dto.Metric) error {
- if err := m.wrappedMetric.Write(out); err != nil {
- return err
- }
- if len(m.labels) == 0 {
- // No wrapping labels.
- return nil
- }
- for ln, lv := range m.labels {
- out.Label = append(out.Label, &dto.LabelPair{
- Name: proto.String(ln),
- Value: proto.String(lv),
- })
- }
- sort.Sort(internal.LabelPairSorter(out.Label))
- return nil
-}
-
-func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
- constLabels := Labels{}
- for _, lp := range desc.constLabelPairs {
- constLabels[*lp.Name] = *lp.Value
- }
- for ln, lv := range labels {
- if _, alreadyUsed := constLabels[ln]; alreadyUsed {
- return &Desc{
- fqName: desc.fqName,
- help: desc.help,
- variableLabels: desc.variableLabels,
- constLabelPairs: desc.constLabelPairs,
- err: fmt.Errorf("attempted wrapping with already existing label name %q", ln),
- }
- }
- constLabels[ln] = lv
- }
- // NewDesc will do remaining validations.
- newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
- // Propagate errors if there was any. This will override any errer
- // created by NewDesc above, i.e. earlier errors get precedence.
- if desc.err != nil {
- newDesc.err = desc.err
- }
- return newDesc
-}
diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE
deleted file mode 100644
index 20110e410..000000000
--- a/vendor/github.com/prometheus/client_model/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-Data model artifacts for Prometheus.
-Copyright 2012-2015 The Prometheus Authors
-
-This product includes software developed at
-SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
deleted file mode 100644
index 2f1549075..000000000
--- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go
+++ /dev/null
@@ -1,1399 +0,0 @@
-// Copyright 2013 Prometheus Team
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.30.0
-// protoc v3.20.3
-// source: io/prometheus/client/metrics.proto
-
-package io_prometheus_client
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type MetricType int32
-
-const (
- // COUNTER must use the Metric field "counter".
- MetricType_COUNTER MetricType = 0
- // GAUGE must use the Metric field "gauge".
- MetricType_GAUGE MetricType = 1
- // SUMMARY must use the Metric field "summary".
- MetricType_SUMMARY MetricType = 2
- // UNTYPED must use the Metric field "untyped".
- MetricType_UNTYPED MetricType = 3
- // HISTOGRAM must use the Metric field "histogram".
- MetricType_HISTOGRAM MetricType = 4
- // GAUGE_HISTOGRAM must use the Metric field "histogram".
- MetricType_GAUGE_HISTOGRAM MetricType = 5
-)
-
-// Enum value maps for MetricType.
-var (
- MetricType_name = map[int32]string{
- 0: "COUNTER",
- 1: "GAUGE",
- 2: "SUMMARY",
- 3: "UNTYPED",
- 4: "HISTOGRAM",
- 5: "GAUGE_HISTOGRAM",
- }
- MetricType_value = map[string]int32{
- "COUNTER": 0,
- "GAUGE": 1,
- "SUMMARY": 2,
- "UNTYPED": 3,
- "HISTOGRAM": 4,
- "GAUGE_HISTOGRAM": 5,
- }
-)
-
-func (x MetricType) Enum() *MetricType {
- p := new(MetricType)
- *p = x
- return p
-}
-
-func (x MetricType) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (MetricType) Descriptor() protoreflect.EnumDescriptor {
- return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor()
-}
-
-func (MetricType) Type() protoreflect.EnumType {
- return &file_io_prometheus_client_metrics_proto_enumTypes[0]
-}
-
-func (x MetricType) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Do not use.
-func (x *MetricType) UnmarshalJSON(b []byte) error {
- num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
- if err != nil {
- return err
- }
- *x = MetricType(num)
- return nil
-}
-
-// Deprecated: Use MetricType.Descriptor instead.
-func (MetricType) EnumDescriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
-}
-
-type LabelPair struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
-}
-
-func (x *LabelPair) Reset() {
- *x = LabelPair{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *LabelPair) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*LabelPair) ProtoMessage() {}
-
-func (x *LabelPair) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead.
-func (*LabelPair) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *LabelPair) GetName() string {
- if x != nil && x.Name != nil {
- return *x.Name
- }
- return ""
-}
-
-func (x *LabelPair) GetValue() string {
- if x != nil && x.Value != nil {
- return *x.Value
- }
- return ""
-}
-
-type Gauge struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
-}
-
-func (x *Gauge) Reset() {
- *x = Gauge{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Gauge) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Gauge) ProtoMessage() {}
-
-func (x *Gauge) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Gauge.ProtoReflect.Descriptor instead.
-func (*Gauge) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *Gauge) GetValue() float64 {
- if x != nil && x.Value != nil {
- return *x.Value
- }
- return 0
-}
-
-type Counter struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
- CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
-}
-
-func (x *Counter) Reset() {
- *x = Counter{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Counter) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Counter) ProtoMessage() {}
-
-func (x *Counter) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Counter.ProtoReflect.Descriptor instead.
-func (*Counter) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *Counter) GetValue() float64 {
- if x != nil && x.Value != nil {
- return *x.Value
- }
- return 0
-}
-
-func (x *Counter) GetExemplar() *Exemplar {
- if x != nil {
- return x.Exemplar
- }
- return nil
-}
-
-func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp {
- if x != nil {
- return x.CreatedTimestamp
- }
- return nil
-}
-
-type Quantile struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
-}
-
-func (x *Quantile) Reset() {
- *x = Quantile{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Quantile) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Quantile) ProtoMessage() {}
-
-func (x *Quantile) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Quantile.ProtoReflect.Descriptor instead.
-func (*Quantile) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *Quantile) GetQuantile() float64 {
- if x != nil && x.Quantile != nil {
- return *x.Quantile
- }
- return 0
-}
-
-func (x *Quantile) GetValue() float64 {
- if x != nil && x.Value != nil {
- return *x.Value
- }
- return 0
-}
-
-type Summary struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
- Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
- CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
-}
-
-func (x *Summary) Reset() {
- *x = Summary{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Summary) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Summary) ProtoMessage() {}
-
-func (x *Summary) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Summary.ProtoReflect.Descriptor instead.
-func (*Summary) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *Summary) GetSampleCount() uint64 {
- if x != nil && x.SampleCount != nil {
- return *x.SampleCount
- }
- return 0
-}
-
-func (x *Summary) GetSampleSum() float64 {
- if x != nil && x.SampleSum != nil {
- return *x.SampleSum
- }
- return 0
-}
-
-func (x *Summary) GetQuantile() []*Quantile {
- if x != nil {
- return x.Quantile
- }
- return nil
-}
-
-func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp {
- if x != nil {
- return x.CreatedTimestamp
- }
- return nil
-}
-
-type Untyped struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
-}
-
-func (x *Untyped) Reset() {
- *x = Untyped{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Untyped) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Untyped) ProtoMessage() {}
-
-func (x *Untyped) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Untyped.ProtoReflect.Descriptor instead.
-func (*Untyped) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *Untyped) GetValue() float64 {
- if x != nil && x.Value != nil {
- return *x.Value
- }
- return 0
-}
-
-type Histogram struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
- SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0.
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
- // Buckets for the conventional histogram.
- Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional.
- CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
- // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
- // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
- // then each power of two is divided into 2^n logarithmic buckets.
- // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
- // In the future, more bucket schemas may be added using numbers < -4 or > 8.
- Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"`
- ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket.
- ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket.
- ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0.
- // Negative buckets for the native histogram.
- NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"`
- // Use either "negative_delta" or "negative_count", the former for
- // regular histograms with integer counts, the latter for float
- // histograms.
- NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
- NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket.
- // Positive buckets for the native histogram.
- // Use a no-op span (offset 0, length 0) for a native histogram without any
- // observations yet and with a zero_threshold of 0. Otherwise, it would be
- // indistinguishable from a classic histogram.
- PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
- // Use either "positive_delta" or "positive_count", the former for
- // regular histograms with integer counts, the latter for float
- // histograms.
- PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
- PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket.
- // Only used for native histograms. These exemplars MUST have a timestamp.
- Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"`
-}
-
-func (x *Histogram) Reset() {
- *x = Histogram{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Histogram) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Histogram) ProtoMessage() {}
-
-func (x *Histogram) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Histogram.ProtoReflect.Descriptor instead.
-func (*Histogram) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *Histogram) GetSampleCount() uint64 {
- if x != nil && x.SampleCount != nil {
- return *x.SampleCount
- }
- return 0
-}
-
-func (x *Histogram) GetSampleCountFloat() float64 {
- if x != nil && x.SampleCountFloat != nil {
- return *x.SampleCountFloat
- }
- return 0
-}
-
-func (x *Histogram) GetSampleSum() float64 {
- if x != nil && x.SampleSum != nil {
- return *x.SampleSum
- }
- return 0
-}
-
-func (x *Histogram) GetBucket() []*Bucket {
- if x != nil {
- return x.Bucket
- }
- return nil
-}
-
-func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp {
- if x != nil {
- return x.CreatedTimestamp
- }
- return nil
-}
-
-func (x *Histogram) GetSchema() int32 {
- if x != nil && x.Schema != nil {
- return *x.Schema
- }
- return 0
-}
-
-func (x *Histogram) GetZeroThreshold() float64 {
- if x != nil && x.ZeroThreshold != nil {
- return *x.ZeroThreshold
- }
- return 0
-}
-
-func (x *Histogram) GetZeroCount() uint64 {
- if x != nil && x.ZeroCount != nil {
- return *x.ZeroCount
- }
- return 0
-}
-
-func (x *Histogram) GetZeroCountFloat() float64 {
- if x != nil && x.ZeroCountFloat != nil {
- return *x.ZeroCountFloat
- }
- return 0
-}
-
-func (x *Histogram) GetNegativeSpan() []*BucketSpan {
- if x != nil {
- return x.NegativeSpan
- }
- return nil
-}
-
-func (x *Histogram) GetNegativeDelta() []int64 {
- if x != nil {
- return x.NegativeDelta
- }
- return nil
-}
-
-func (x *Histogram) GetNegativeCount() []float64 {
- if x != nil {
- return x.NegativeCount
- }
- return nil
-}
-
-func (x *Histogram) GetPositiveSpan() []*BucketSpan {
- if x != nil {
- return x.PositiveSpan
- }
- return nil
-}
-
-func (x *Histogram) GetPositiveDelta() []int64 {
- if x != nil {
- return x.PositiveDelta
- }
- return nil
-}
-
-func (x *Histogram) GetPositiveCount() []float64 {
- if x != nil {
- return x.PositiveCount
- }
- return nil
-}
-
-func (x *Histogram) GetExemplars() []*Exemplar {
- if x != nil {
- return x.Exemplars
- }
- return nil
-}
-
-// A Bucket of a conventional histogram, each of which is treated as
-// an individual counter-like time series by Prometheus.
-type Bucket struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order.
- CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0.
- UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive.
- Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
-}
-
-func (x *Bucket) Reset() {
- *x = Bucket{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Bucket) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Bucket) ProtoMessage() {}
-
-func (x *Bucket) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Bucket.ProtoReflect.Descriptor instead.
-func (*Bucket) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *Bucket) GetCumulativeCount() uint64 {
- if x != nil && x.CumulativeCount != nil {
- return *x.CumulativeCount
- }
- return 0
-}
-
-func (x *Bucket) GetCumulativeCountFloat() float64 {
- if x != nil && x.CumulativeCountFloat != nil {
- return *x.CumulativeCountFloat
- }
- return 0
-}
-
-func (x *Bucket) GetUpperBound() float64 {
- if x != nil && x.UpperBound != nil {
- return *x.UpperBound
- }
- return 0
-}
-
-func (x *Bucket) GetExemplar() *Exemplar {
- if x != nil {
- return x.Exemplar
- }
- return nil
-}
-
-// A BucketSpan defines a number of consecutive buckets in a native
-// histogram with their offset. Logically, it would be more
-// straightforward to include the bucket counts in the Span. However,
-// the protobuf representation is more compact in the way the data is
-// structured here (with all the buckets in a single array separate
-// from the Spans).
-type BucketSpan struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative).
- Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets.
-}
-
-func (x *BucketSpan) Reset() {
- *x = BucketSpan{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *BucketSpan) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*BucketSpan) ProtoMessage() {}
-
-func (x *BucketSpan) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead.
-func (*BucketSpan) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *BucketSpan) GetOffset() int32 {
- if x != nil && x.Offset != nil {
- return *x.Offset
- }
- return 0
-}
-
-func (x *BucketSpan) GetLength() uint32 {
- if x != nil && x.Length != nil {
- return *x.Length
- }
- return 0
-}
-
-type Exemplar struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style.
-}
-
-func (x *Exemplar) Reset() {
- *x = Exemplar{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Exemplar) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Exemplar) ProtoMessage() {}
-
-func (x *Exemplar) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead.
-func (*Exemplar) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9}
-}
-
-func (x *Exemplar) GetLabel() []*LabelPair {
- if x != nil {
- return x.Label
- }
- return nil
-}
-
-func (x *Exemplar) GetValue() float64 {
- if x != nil && x.Value != nil {
- return *x.Value
- }
- return 0
-}
-
-func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp {
- if x != nil {
- return x.Timestamp
- }
- return nil
-}
-
-type Metric struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
- Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
- Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
- Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
- Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
- TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
-}
-
-func (x *Metric) Reset() {
- *x = Metric{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Metric) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Metric) ProtoMessage() {}
-
-func (x *Metric) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Metric.ProtoReflect.Descriptor instead.
-func (*Metric) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10}
-}
-
-func (x *Metric) GetLabel() []*LabelPair {
- if x != nil {
- return x.Label
- }
- return nil
-}
-
-func (x *Metric) GetGauge() *Gauge {
- if x != nil {
- return x.Gauge
- }
- return nil
-}
-
-func (x *Metric) GetCounter() *Counter {
- if x != nil {
- return x.Counter
- }
- return nil
-}
-
-func (x *Metric) GetSummary() *Summary {
- if x != nil {
- return x.Summary
- }
- return nil
-}
-
-func (x *Metric) GetUntyped() *Untyped {
- if x != nil {
- return x.Untyped
- }
- return nil
-}
-
-func (x *Metric) GetHistogram() *Histogram {
- if x != nil {
- return x.Histogram
- }
- return nil
-}
-
-func (x *Metric) GetTimestampMs() int64 {
- if x != nil && x.TimestampMs != nil {
- return *x.TimestampMs
- }
- return 0
-}
-
-type MetricFamily struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
- Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
- Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
- Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"`
-}
-
-func (x *MetricFamily) Reset() {
- *x = MetricFamily{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *MetricFamily) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MetricFamily) ProtoMessage() {}
-
-func (x *MetricFamily) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead.
-func (*MetricFamily) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11}
-}
-
-func (x *MetricFamily) GetName() string {
- if x != nil && x.Name != nil {
- return *x.Name
- }
- return ""
-}
-
-func (x *MetricFamily) GetHelp() string {
- if x != nil && x.Help != nil {
- return *x.Help
- }
- return ""
-}
-
-func (x *MetricFamily) GetType() MetricType {
- if x != nil && x.Type != nil {
- return *x.Type
- }
- return MetricType_COUNTER
-}
-
-func (x *MetricFamily) GetMetric() []*Metric {
- if x != nil {
- return x.Metric
- }
- return nil
-}
-
-func (x *MetricFamily) GetUnit() string {
- if x != nil && x.Unit != nil {
- return *x.Unit
- }
- return ""
-}
-
-var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor
-
-var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
- 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f,
- 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
- 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c,
- 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
- 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65,
- 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12,
- 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
- 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e,
- 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
- 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61,
- 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75,
- 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65,
- 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f,
- 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c,
- 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
- 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
- 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75,
- 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
- 0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
- 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74,
- 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48,
- 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
- 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
- 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73,
- 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61,
- 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43,
- 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d,
- 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73,
- 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
- 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
- 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47,
- 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
- 0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12,
- 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c,
- 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72,
- 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63,
- 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f,
- 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f,
- 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52,
- 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12,
- 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e,
- 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
- 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
- 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
- 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d,
- 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a,
- 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
- 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43,
- 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65,
- 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f,
- 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
- 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70,
- 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70,
- 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20,
- 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c,
- 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
- 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69,
- 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65,
- 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
- 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
- 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78,
- 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65,
- 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75,
- 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a,
- 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
- 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63,
- 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c,
- 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75,
- 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42,
- 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
- 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78,
- 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
- 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16,
- 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06,
- 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91,
- 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c,
- 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e,
- 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
- 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62,
- 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a,
- 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69,
- 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
- 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c,
- 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
- 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65,
- 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
- 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
- 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
- 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72,
- 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
- 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79,
- 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74,
- 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
- 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
- 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70,
- 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18,
- 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
- 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73,
- 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
- 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d,
- 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46,
- 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c,
- 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a,
- 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f,
- 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
- 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74,
- 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
- 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69,
- 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69,
- 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a,
- 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43,
- 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47,
- 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02,
- 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a,
- 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f,
- 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10,
- 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
- 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75,
- 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
- 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f,
- 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63,
- 0x6c, 0x69, 0x65, 0x6e, 0x74,
-}
-
-var (
- file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once
- file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc
-)
-
-func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte {
- file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() {
- file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData)
- })
- return file_io_prometheus_client_metrics_proto_rawDescData
-}
-
-var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
-var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{
- (MetricType)(0), // 0: io.prometheus.client.MetricType
- (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair
- (*Gauge)(nil), // 2: io.prometheus.client.Gauge
- (*Counter)(nil), // 3: io.prometheus.client.Counter
- (*Quantile)(nil), // 4: io.prometheus.client.Quantile
- (*Summary)(nil), // 5: io.prometheus.client.Summary
- (*Untyped)(nil), // 6: io.prometheus.client.Untyped
- (*Histogram)(nil), // 7: io.prometheus.client.Histogram
- (*Bucket)(nil), // 8: io.prometheus.client.Bucket
- (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan
- (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar
- (*Metric)(nil), // 11: io.prometheus.client.Metric
- (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily
- (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp
-}
-var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
- 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar
- 13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp
- 4, // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile
- 13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp
- 8, // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket
- 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp
- 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
- 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
- 10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar
- 10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
- 1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
- 13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
- 1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
- 2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
- 3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
- 5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
- 6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
- 7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
- 0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
- 11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
- 20, // [20:20] is the sub-list for method output_type
- 20, // [20:20] is the sub-list for method input_type
- 20, // [20:20] is the sub-list for extension type_name
- 20, // [20:20] is the sub-list for extension extendee
- 0, // [0:20] is the sub-list for field type_name
-}
-
-func init() { file_io_prometheus_client_metrics_proto_init() }
-func file_io_prometheus_client_metrics_proto_init() {
- if File_io_prometheus_client_metrics_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*LabelPair); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Gauge); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Counter); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Quantile); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Summary); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Untyped); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Histogram); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*BucketSpan); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Exemplar); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Metric); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MetricFamily); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc,
- NumEnums: 1,
- NumMessages: 12,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_io_prometheus_client_metrics_proto_goTypes,
- DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs,
- EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes,
- MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes,
- }.Build()
- File_io_prometheus_client_metrics_proto = out.File
- file_io_prometheus_client_metrics_proto_rawDesc = nil
- file_io_prometheus_client_metrics_proto_goTypes = nil
- file_io_prometheus_client_metrics_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE
deleted file mode 100644
index 636a2c1a5..000000000
--- a/vendor/github.com/prometheus/common/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-Common libraries shared by Prometheus Go components.
-Copyright 2015 The Prometheus Authors
-
-This product includes software developed at
-SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
deleted file mode 100644
index 1448439b7..000000000
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ /dev/null
@@ -1,431 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "bufio"
- "fmt"
- "io"
- "math"
- "mime"
- "net/http"
-
- dto "github.com/prometheus/client_model/go"
- "google.golang.org/protobuf/encoding/protodelim"
-
- "github.com/prometheus/common/model"
-)
-
-// Decoder types decode an input stream into metric families.
-type Decoder interface {
- Decode(*dto.MetricFamily) error
-}
-
-// DecodeOptions contains options used by the Decoder and in sample extraction.
-type DecodeOptions struct {
- // Timestamp is added to each value from the stream that has no explicit timestamp set.
- Timestamp model.Time
-}
-
-// ResponseFormat extracts the correct format from a HTTP response header.
-// If no matching format can be found FormatUnknown is returned.
-func ResponseFormat(h http.Header) Format {
- ct := h.Get(hdrContentType)
-
- mediatype, params, err := mime.ParseMediaType(ct)
- if err != nil {
- return FmtUnknown
- }
-
- const textType = "text/plain"
-
- switch mediatype {
- case ProtoType:
- if p, ok := params["proto"]; ok && p != ProtoProtocol {
- return FmtUnknown
- }
- if e, ok := params["encoding"]; ok && e != "delimited" {
- return FmtUnknown
- }
- return FmtProtoDelim
-
- case textType:
- if v, ok := params["version"]; ok && v != TextVersion {
- return FmtUnknown
- }
- return FmtText
- }
-
- return FmtUnknown
-}
-
-// NewDecoder returns a new decoder based on the given input format.
-// If the input format does not imply otherwise, a text format decoder is returned.
-func NewDecoder(r io.Reader, format Format) Decoder {
- switch format.FormatType() {
- case TypeProtoDelim:
- return &protoDecoder{r: bufio.NewReader(r)}
- }
- return &textDecoder{r: r}
-}
-
-// protoDecoder implements the Decoder interface for protocol buffers.
-type protoDecoder struct {
- r protodelim.Reader
-}
-
-// Decode implements the Decoder interface.
-func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
- opts := protodelim.UnmarshalOptions{
- MaxSize: -1,
- }
- if err := opts.UnmarshalFrom(d.r, v); err != nil {
- return err
- }
- if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
- return fmt.Errorf("invalid metric name %q", v.GetName())
- }
- for _, m := range v.GetMetric() {
- if m == nil {
- continue
- }
- for _, l := range m.GetLabel() {
- if l == nil {
- continue
- }
- if !model.LabelValue(l.GetValue()).IsValid() {
- return fmt.Errorf("invalid label value %q", l.GetValue())
- }
- if !model.LabelName(l.GetName()).IsValid() {
- return fmt.Errorf("invalid label name %q", l.GetName())
- }
- }
- }
- return nil
-}
-
-// textDecoder implements the Decoder interface for the text protocol.
-type textDecoder struct {
- r io.Reader
- fams map[string]*dto.MetricFamily
- err error
-}
-
-// Decode implements the Decoder interface.
-func (d *textDecoder) Decode(v *dto.MetricFamily) error {
- if d.err == nil {
- // Read all metrics in one shot.
- var p TextParser
- d.fams, d.err = p.TextToMetricFamilies(d.r)
- // If we don't get an error, store io.EOF for the end.
- if d.err == nil {
- d.err = io.EOF
- }
- }
- // Pick off one MetricFamily per Decode until there's nothing left.
- for key, fam := range d.fams {
- v.Name = fam.Name
- v.Help = fam.Help
- v.Type = fam.Type
- v.Metric = fam.Metric
- delete(d.fams, key)
- return nil
- }
- return d.err
-}
-
-// SampleDecoder wraps a Decoder to extract samples from the metric families
-// decoded by the wrapped Decoder.
-type SampleDecoder struct {
- Dec Decoder
- Opts *DecodeOptions
-
- f dto.MetricFamily
-}
-
-// Decode calls the Decode method of the wrapped Decoder and then extracts the
-// samples from the decoded MetricFamily into the provided model.Vector.
-func (sd *SampleDecoder) Decode(s *model.Vector) error {
- err := sd.Dec.Decode(&sd.f)
- if err != nil {
- return err
- }
- *s, err = extractSamples(&sd.f, sd.Opts)
- return err
-}
-
-// ExtractSamples builds a slice of samples from the provided metric
-// families. If an error occurs during sample extraction, it continues to
-// extract from the remaining metric families. The returned error is the last
-// error that has occurred.
-func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
- var (
- all model.Vector
- lastErr error
- )
- for _, f := range fams {
- some, err := extractSamples(f, o)
- if err != nil {
- lastErr = err
- continue
- }
- all = append(all, some...)
- }
- return all, lastErr
-}
-
-func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
- switch f.GetType() {
- case dto.MetricType_COUNTER:
- return extractCounter(o, f), nil
- case dto.MetricType_GAUGE:
- return extractGauge(o, f), nil
- case dto.MetricType_SUMMARY:
- return extractSummary(o, f), nil
- case dto.MetricType_UNTYPED:
- return extractUntyped(o, f), nil
- case dto.MetricType_HISTOGRAM:
- return extractHistogram(o, f), nil
- }
- return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
-}
-
-func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Counter == nil {
- continue
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
-
- smpl := &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Counter.GetValue()),
- }
-
- if m.TimestampMs != nil {
- smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- } else {
- smpl.Timestamp = o.Timestamp
- }
-
- samples = append(samples, smpl)
- }
-
- return samples
-}
-
-func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Gauge == nil {
- continue
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
-
- smpl := &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Gauge.GetValue()),
- }
-
- if m.TimestampMs != nil {
- smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- } else {
- smpl.Timestamp = o.Timestamp
- }
-
- samples = append(samples, smpl)
- }
-
- return samples
-}
-
-func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Untyped == nil {
- continue
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
-
- smpl := &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Untyped.GetValue()),
- }
-
- if m.TimestampMs != nil {
- smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- } else {
- smpl.Timestamp = o.Timestamp
- }
-
- samples = append(samples, smpl)
- }
-
- return samples
-}
-
-func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Summary == nil {
- continue
- }
-
- timestamp := o.Timestamp
- if m.TimestampMs != nil {
- timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- }
-
- for _, q := range m.Summary.Quantile {
- lset := make(model.LabelSet, len(m.Label)+2)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- // BUG(matt): Update other names to "quantile".
- lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(q.GetValue()),
- Timestamp: timestamp,
- })
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Summary.GetSampleSum()),
- Timestamp: timestamp,
- })
-
- lset = make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Summary.GetSampleCount()),
- Timestamp: timestamp,
- })
- }
-
- return samples
-}
-
-func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Histogram == nil {
- continue
- }
-
- timestamp := o.Timestamp
- if m.TimestampMs != nil {
- timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- }
-
- infSeen := false
-
- for _, q := range m.Histogram.Bucket {
- lset := make(model.LabelSet, len(m.Label)+2)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
-
- if math.IsInf(q.GetUpperBound(), +1) {
- infSeen = true
- }
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(q.GetCumulativeCount()),
- Timestamp: timestamp,
- })
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Histogram.GetSampleSum()),
- Timestamp: timestamp,
- })
-
- lset = make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
-
- count := &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Histogram.GetSampleCount()),
- Timestamp: timestamp,
- }
- samples = append(samples, count)
-
- if !infSeen {
- // Append an infinity bucket sample.
- lset := make(model.LabelSet, len(m.Label)+2)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: count.Value,
- Timestamp: timestamp,
- })
- }
- }
-
- return samples
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
deleted file mode 100644
index d7f3d76f5..000000000
--- a/vendor/github.com/prometheus/common/expfmt/encode.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "fmt"
- "io"
- "net/http"
-
- "google.golang.org/protobuf/encoding/protodelim"
- "google.golang.org/protobuf/encoding/prototext"
-
- "github.com/prometheus/common/model"
-
- "github.com/munnerz/goautoneg"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// Encoder types encode metric families into an underlying wire protocol.
-type Encoder interface {
- Encode(*dto.MetricFamily) error
-}
-
-// Closer is implemented by Encoders that need to be closed to finalize
-// encoding. (For example, OpenMetrics needs a final `# EOF` line.)
-//
-// Note that all Encoder implementations returned from this package implement
-// Closer, too, even if the Close call is a no-op. This happens in preparation
-// for adding a Close method to the Encoder interface directly in a (mildly
-// breaking) release in the future.
-type Closer interface {
- Close() error
-}
-
-type encoderCloser struct {
- encode func(*dto.MetricFamily) error
- close func() error
-}
-
-func (ec encoderCloser) Encode(v *dto.MetricFamily) error {
- return ec.encode(v)
-}
-
-func (ec encoderCloser) Close() error {
- return ec.close()
-}
-
-// Negotiate returns the Content-Type based on the given Accept header. If no
-// appropriate accepted type is found, FmtText is returned (which is the
-// Prometheus text format). This function will never negotiate FmtOpenMetrics,
-// as the support is still experimental. To include the option to negotiate
-// FmtOpenMetrics, use NegotiateOpenMetrics.
-func Negotiate(h http.Header) Format {
- escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
- for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
- if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
- switch Format(escapeParam) {
- case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
- escapingScheme = Format("; escaping=" + escapeParam)
- default:
- // If the escaping parameter is unknown, ignore it.
- }
- }
- ver := ac.Params["version"]
- if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
- switch ac.Params["encoding"] {
- case "delimited":
- return FmtProtoDelim + escapingScheme
- case "text":
- return FmtProtoText + escapingScheme
- case "compact-text":
- return FmtProtoCompact + escapingScheme
- }
- }
- if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
- return FmtText + escapingScheme
- }
- }
- return FmtText + escapingScheme
-}
-
-// NegotiateIncludingOpenMetrics works like Negotiate but includes
-// FmtOpenMetrics as an option for the result. Note that this function is
-// temporary and will disappear once FmtOpenMetrics is fully supported and as
-// such may be negotiated by the normal Negotiate function.
-func NegotiateIncludingOpenMetrics(h http.Header) Format {
- escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
- for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
- if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
- switch Format(escapeParam) {
- case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
- escapingScheme = Format("; escaping=" + escapeParam)
- default:
- // If the escaping parameter is unknown, ignore it.
- }
- }
- ver := ac.Params["version"]
- if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
- switch ac.Params["encoding"] {
- case "delimited":
- return FmtProtoDelim + escapingScheme
- case "text":
- return FmtProtoText + escapingScheme
- case "compact-text":
- return FmtProtoCompact + escapingScheme
- }
- }
- if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
- return FmtText + escapingScheme
- }
- if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
- switch ver {
- case OpenMetricsVersion_1_0_0:
- return FmtOpenMetrics_1_0_0 + escapingScheme
- default:
- return FmtOpenMetrics_0_0_1 + escapingScheme
- }
- }
- }
- return FmtText + escapingScheme
-}
-
-// NewEncoder returns a new encoder based on content type negotiation. All
-// Encoder implementations returned by NewEncoder also implement Closer, and
-// callers should always call the Close method. It is currently only required
-// for FmtOpenMetrics, but a future (breaking) release will add the Close method
-// to the Encoder interface directly. The current version of the Encoder
-// interface is kept for backwards compatibility.
-// In cases where the Format does not allow for UTF-8 names, the global
-// NameEscapingScheme will be applied.
-//
-// NewEncoder can be called with additional options to customize the OpenMetrics text output.
-// For example:
-// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines())
-//
-// Extra options are ignored for all other formats.
-func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder {
- escapingScheme := format.ToEscapingScheme()
-
- switch format.FormatType() {
- case TypeProtoDelim:
- return encoderCloser{
- encode: func(v *dto.MetricFamily) error {
- _, err := protodelim.MarshalTo(w, v)
- return err
- },
- close: func() error { return nil },
- }
- case TypeProtoCompact:
- return encoderCloser{
- encode: func(v *dto.MetricFamily) error {
- _, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String())
- return err
- },
- close: func() error { return nil },
- }
- case TypeProtoText:
- return encoderCloser{
- encode: func(v *dto.MetricFamily) error {
- _, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme)))
- return err
- },
- close: func() error { return nil },
- }
- case TypeTextPlain:
- return encoderCloser{
- encode: func(v *dto.MetricFamily) error {
- _, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme))
- return err
- },
- close: func() error { return nil },
- }
- case TypeOpenMetrics:
- return encoderCloser{
- encode: func(v *dto.MetricFamily) error {
- _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...)
- return err
- },
- close: func() error {
- _, err := FinalizeOpenMetrics(w)
- return err
- },
- }
- }
- panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format))
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
deleted file mode 100644
index b26886560..000000000
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package expfmt contains tools for reading and writing Prometheus metrics.
-package expfmt
-
-import (
- "errors"
- "strings"
-
- "github.com/prometheus/common/model"
-)
-
-// Format specifies the HTTP content type of the different wire protocols.
-type Format string
-
-// Constants to assemble the Content-Type values for the different wire
-// protocols. The Content-Type strings here are all for the legacy exposition
-// formats, where valid characters for metric names and label names are limited.
-// Support for arbitrary UTF-8 characters in those names is already partially
-// implemented in this module (see model.ValidationScheme), but to actually use
-// it on the wire, new content-type strings will have to be agreed upon and
-// added here.
-const (
- TextVersion = "0.0.4"
- ProtoType = `application/vnd.google.protobuf`
- ProtoProtocol = `io.prometheus.client.MetricFamily`
- // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
- ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
- OpenMetricsType = `application/openmetrics-text`
- OpenMetricsVersion_0_0_1 = "0.0.1"
- OpenMetricsVersion_1_0_0 = "1.0.0"
-
- // The Content-Type values for the different wire protocols. Do not do direct
- // comparisons to these constants, instead use the comparison functions.
- // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead.
- FmtUnknown Format = ``
- // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead.
- FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
- // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead.
- FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
- // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead.
- FmtProtoText Format = ProtoFmt + ` encoding=text`
- // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
- FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
- // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
- FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
- // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
- FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
-)
-
-const (
- hdrContentType = "Content-Type"
- hdrAccept = "Accept"
-)
-
-// FormatType is a Go enum representing the overall category for the given
-// Format. As the number of Format permutations increases, doing basic string
-// comparisons are not feasible, so this enum captures the most useful
-// high-level attribute of the Format string.
-type FormatType int
-
-const (
- TypeUnknown FormatType = iota
- TypeProtoCompact
- TypeProtoDelim
- TypeProtoText
- TypeTextPlain
- TypeOpenMetrics
-)
-
-// NewFormat generates a new Format from the type provided. Mostly used for
-// tests, most Formats should be generated as part of content negotiation in
-// encode.go. If a type has more than one version, the latest version will be
-// returned.
-func NewFormat(t FormatType) Format {
- switch t {
- case TypeProtoCompact:
- return FmtProtoCompact
- case TypeProtoDelim:
- return FmtProtoDelim
- case TypeProtoText:
- return FmtProtoText
- case TypeTextPlain:
- return FmtText
- case TypeOpenMetrics:
- return FmtOpenMetrics_1_0_0
- default:
- return FmtUnknown
- }
-}
-
-// NewOpenMetricsFormat generates a new OpenMetrics format matching the
-// specified version number.
-func NewOpenMetricsFormat(version string) (Format, error) {
- if version == OpenMetricsVersion_0_0_1 {
- return FmtOpenMetrics_0_0_1, nil
- }
- if version == OpenMetricsVersion_1_0_0 {
- return FmtOpenMetrics_1_0_0, nil
- }
- return FmtUnknown, errors.New("unknown open metrics version string")
-}
-
-// WithEscapingScheme returns a copy of Format with the specified escaping
-// scheme appended to the end. If an escaping scheme already exists it is
-// removed.
-func (f Format) WithEscapingScheme(s model.EscapingScheme) Format {
- var terms []string
- for _, p := range strings.Split(string(f), ";") {
- toks := strings.Split(p, "=")
- if len(toks) != 2 {
- trimmed := strings.TrimSpace(p)
- if len(trimmed) > 0 {
- terms = append(terms, trimmed)
- }
- continue
- }
- key := strings.TrimSpace(toks[0])
- if key != model.EscapingKey {
- terms = append(terms, strings.TrimSpace(p))
- }
- }
- terms = append(terms, model.EscapingKey+"="+s.String())
- return Format(strings.Join(terms, "; "))
-}
-
-// FormatType deduces an overall FormatType for the given format.
-func (f Format) FormatType() FormatType {
- toks := strings.Split(string(f), ";")
- params := make(map[string]string)
- for i, t := range toks {
- if i == 0 {
- continue
- }
- args := strings.Split(t, "=")
- if len(args) != 2 {
- continue
- }
- params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1])
- }
-
- switch strings.TrimSpace(toks[0]) {
- case ProtoType:
- if params["proto"] != ProtoProtocol {
- return TypeUnknown
- }
- switch params["encoding"] {
- case "delimited":
- return TypeProtoDelim
- case "text":
- return TypeProtoText
- case "compact-text":
- return TypeProtoCompact
- default:
- return TypeUnknown
- }
- case OpenMetricsType:
- if params["charset"] != "utf-8" {
- return TypeUnknown
- }
- return TypeOpenMetrics
- case "text/plain":
- v, ok := params["version"]
- if !ok {
- return TypeTextPlain
- }
- if v == TextVersion {
- return TypeTextPlain
- }
- return TypeUnknown
- default:
- return TypeUnknown
- }
-}
-
-// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the
-// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid
-// "escaping" term exists, that will be used. Otherwise, the global default will
-// be returned.
-func (format Format) ToEscapingScheme() model.EscapingScheme {
- for _, p := range strings.Split(string(format), ";") {
- toks := strings.Split(p, "=")
- if len(toks) != 2 {
- continue
- }
- key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1])
- if key == model.EscapingKey {
- scheme, err := model.ToEscapingScheme(value)
- if err != nil {
- return model.NameEscapingScheme
- }
- return scheme
- }
- }
- return model.NameEscapingScheme
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
deleted file mode 100644
index dfac962a4..000000000
--- a/vendor/github.com/prometheus/common/expfmt/fuzz.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Build only when actually fuzzing
-//go:build gofuzz
-// +build gofuzz
-
-package expfmt
-
-import "bytes"
-
-// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
-//
-// go-fuzz-build github.com/prometheus/common/expfmt
-// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
-//
-// Further input samples should go in the folder fuzz/corpus.
-func Fuzz(in []byte) int {
- parser := TextParser{}
- _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
-
- if err != nil {
- return 0
- }
-
- return 1
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
deleted file mode 100644
index a21ed4ec1..000000000
--- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
+++ /dev/null
@@ -1,696 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "math"
- "strconv"
- "strings"
-
- "google.golang.org/protobuf/types/known/timestamppb"
-
- "github.com/prometheus/common/model"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-type encoderOption struct {
- withCreatedLines bool
- withUnit bool
-}
-
-type EncoderOption func(*encoderOption)
-
-// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder
-// to include _created lines (See
-// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1).
-// Created timestamps can improve the accuracy of series reset detection, but
-// come with a bandwidth cost.
-//
-// At the time of writing, created timestamp ingestion is still experimental in
-// Prometheus and need to be enabled with the feature-flag
-// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are
-// still possible. Therefore, it is recommended to use this feature with caution.
-func WithCreatedLines() EncoderOption {
- return func(t *encoderOption) {
- t.withCreatedLines = true
- }
-}
-
-// WithUnit is an EncoderOption enabling a set unit to be written to the output
-// and to be added to the metric name, if it's not there already, as a suffix.
-// Without opting in this way, the unit will not be added to the metric name and,
-// on top of that, the unit will not be passed onto the output, even if it
-// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil.
-func WithUnit() EncoderOption {
- return func(t *encoderOption) {
- t.withUnit = true
- }
-}
-
-// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
-// OpenMetrics text format and writes the resulting lines to 'out'. It returns
-// the number of bytes written and any error encountered. The output will have
-// the same order as the input, no further sorting is performed. Furthermore,
-// this function assumes the input is already sanitized and does not perform any
-// sanity checks. If the input contains duplicate metrics or invalid metric or
-// label names, the conversion will result in invalid text format output.
-//
-// If metric names conform to the legacy validation pattern, they will be placed
-// outside the brackets in the traditional way, like `foo{}`. If the metric name
-// fails the legacy validation check, it will be placed quoted inside the
-// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
-// no error will be thrown in this case.
-//
-// Similar to metric names, if label names conform to the legacy validation
-// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
-// name fails the legacy validation check, it will be quoted:
-// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
-// no error will be thrown in this case.
-//
-// This function fulfills the type 'expfmt.encoder'.
-//
-// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
-// on individual metric families, it is the responsibility of the caller to
-// append this line to 'out' once all metric families have been written.
-// Conveniently, this can be done by calling FinalizeOpenMetrics.
-//
-// The output should be fully OpenMetrics compliant. However, there are a few
-// missing features and peculiarities to avoid complications when switching from
-// Prometheus to OpenMetrics or vice versa:
-//
-// - Counters are expected to have the `_total` suffix in their metric name. In
-// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT`
-// lines. A counter with a missing `_total` suffix is not an error. However,
-// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
-// output.
-//
-// - According to the OM specs, the `# UNIT` line is optional, but if populated,
-// the unit has to be present in the metric name as its suffix:
-// (see https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#unit).
-// However, in order to accommodate any potential scenario where such a change in the
-// metric name is not desirable, the users are here given the choice of either explicitly
-// opt in, in case they wish for the unit to be included in the output AND in the metric name
-// as a suffix (see the description of the WithUnit function above),
-// or not to opt in, in case they don't want for any of that to happen.
-//
-// - No support for the following (optional) features: info type,
-// stateset type, gaugehistogram type.
-//
-// - The size of exemplar labels is not checked (i.e. it's possible to create
-// exemplars that are larger than allowed by the OpenMetrics specification).
-//
-// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
-// with a `NaN` value.)
-func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) {
- toOM := encoderOption{}
- for _, option := range options {
- option(&toOM)
- }
-
- name := in.GetName()
- if name == "" {
- return 0, fmt.Errorf("MetricFamily has no name: %s", in)
- }
-
- // Try the interface upgrade. If it doesn't work, we'll use a
- // bufio.Writer from the sync.Pool.
- w, ok := out.(enhancedWriter)
- if !ok {
- b := bufPool.Get().(*bufio.Writer)
- b.Reset(out)
- w = b
- defer func() {
- bErr := b.Flush()
- if err == nil {
- err = bErr
- }
- bufPool.Put(b)
- }()
- }
-
- var (
- n int
- metricType = in.GetType()
- compliantName = name
- )
- if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") {
- compliantName = name[:len(name)-6]
- }
- if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) {
- compliantName = compliantName + "_" + *in.Unit
- }
-
- // Comments, first HELP, then TYPE.
- if in.Help != nil {
- n, err = w.WriteString("# HELP ")
- written += n
- if err != nil {
- return
- }
- n, err = writeName(w, compliantName)
- written += n
- if err != nil {
- return
- }
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return
- }
- n, err = writeEscapedString(w, *in.Help, true)
- written += n
- if err != nil {
- return
- }
- err = w.WriteByte('\n')
- written++
- if err != nil {
- return
- }
- }
- n, err = w.WriteString("# TYPE ")
- written += n
- if err != nil {
- return
- }
- n, err = writeName(w, compliantName)
- written += n
- if err != nil {
- return
- }
- switch metricType {
- case dto.MetricType_COUNTER:
- if strings.HasSuffix(name, "_total") {
- n, err = w.WriteString(" counter\n")
- } else {
- n, err = w.WriteString(" unknown\n")
- }
- case dto.MetricType_GAUGE:
- n, err = w.WriteString(" gauge\n")
- case dto.MetricType_SUMMARY:
- n, err = w.WriteString(" summary\n")
- case dto.MetricType_UNTYPED:
- n, err = w.WriteString(" unknown\n")
- case dto.MetricType_HISTOGRAM:
- n, err = w.WriteString(" histogram\n")
- default:
- return written, fmt.Errorf("unknown metric type %s", metricType.String())
- }
- written += n
- if err != nil {
- return
- }
- if toOM.withUnit && in.Unit != nil {
- n, err = w.WriteString("# UNIT ")
- written += n
- if err != nil {
- return
- }
- n, err = writeName(w, compliantName)
- written += n
- if err != nil {
- return
- }
-
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return
- }
- n, err = writeEscapedString(w, *in.Unit, true)
- written += n
- if err != nil {
- return
- }
- err = w.WriteByte('\n')
- written++
- if err != nil {
- return
- }
- }
-
- var createdTsBytesWritten int
-
- // Finally the samples, one line for each.
- if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") {
- compliantName = compliantName + "_total"
- }
- for _, metric := range in.Metric {
- switch metricType {
- case dto.MetricType_COUNTER:
- if metric.Counter == nil {
- return written, fmt.Errorf(
- "expected counter in metric %s %s", compliantName, metric,
- )
- }
- n, err = writeOpenMetricsSample(
- w, compliantName, "", metric, "", 0,
- metric.Counter.GetValue(), 0, false,
- metric.Counter.Exemplar,
- )
- if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil {
- createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp())
- n += createdTsBytesWritten
- }
- case dto.MetricType_GAUGE:
- if metric.Gauge == nil {
- return written, fmt.Errorf(
- "expected gauge in metric %s %s", compliantName, metric,
- )
- }
- n, err = writeOpenMetricsSample(
- w, compliantName, "", metric, "", 0,
- metric.Gauge.GetValue(), 0, false,
- nil,
- )
- case dto.MetricType_UNTYPED:
- if metric.Untyped == nil {
- return written, fmt.Errorf(
- "expected untyped in metric %s %s", compliantName, metric,
- )
- }
- n, err = writeOpenMetricsSample(
- w, compliantName, "", metric, "", 0,
- metric.Untyped.GetValue(), 0, false,
- nil,
- )
- case dto.MetricType_SUMMARY:
- if metric.Summary == nil {
- return written, fmt.Errorf(
- "expected summary in metric %s %s", compliantName, metric,
- )
- }
- for _, q := range metric.Summary.Quantile {
- n, err = writeOpenMetricsSample(
- w, compliantName, "", metric,
- model.QuantileLabel, q.GetQuantile(),
- q.GetValue(), 0, false,
- nil,
- )
- written += n
- if err != nil {
- return
- }
- }
- n, err = writeOpenMetricsSample(
- w, compliantName, "_sum", metric, "", 0,
- metric.Summary.GetSampleSum(), 0, false,
- nil,
- )
- written += n
- if err != nil {
- return
- }
- n, err = writeOpenMetricsSample(
- w, compliantName, "_count", metric, "", 0,
- 0, metric.Summary.GetSampleCount(), true,
- nil,
- )
- if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil {
- createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp())
- n += createdTsBytesWritten
- }
- case dto.MetricType_HISTOGRAM:
- if metric.Histogram == nil {
- return written, fmt.Errorf(
- "expected histogram in metric %s %s", compliantName, metric,
- )
- }
- infSeen := false
- for _, b := range metric.Histogram.Bucket {
- n, err = writeOpenMetricsSample(
- w, compliantName, "_bucket", metric,
- model.BucketLabel, b.GetUpperBound(),
- 0, b.GetCumulativeCount(), true,
- b.Exemplar,
- )
- written += n
- if err != nil {
- return
- }
- if math.IsInf(b.GetUpperBound(), +1) {
- infSeen = true
- }
- }
- if !infSeen {
- n, err = writeOpenMetricsSample(
- w, compliantName, "_bucket", metric,
- model.BucketLabel, math.Inf(+1),
- 0, metric.Histogram.GetSampleCount(), true,
- nil,
- )
- written += n
- if err != nil {
- return
- }
- }
- n, err = writeOpenMetricsSample(
- w, compliantName, "_sum", metric, "", 0,
- metric.Histogram.GetSampleSum(), 0, false,
- nil,
- )
- written += n
- if err != nil {
- return
- }
- n, err = writeOpenMetricsSample(
- w, compliantName, "_count", metric, "", 0,
- 0, metric.Histogram.GetSampleCount(), true,
- nil,
- )
- if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil {
- createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp())
- n += createdTsBytesWritten
- }
- default:
- return written, fmt.Errorf(
- "unexpected type in metric %s %s", compliantName, metric,
- )
- }
- written += n
- if err != nil {
- return
- }
- }
- return
-}
-
-// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics.
-func FinalizeOpenMetrics(w io.Writer) (written int, err error) {
- return w.Write([]byte("# EOF\n"))
-}
-
-// writeOpenMetricsSample writes a single sample in OpenMetrics text format to
-// w, given the metric name, the metric proto message itself, optionally an
-// additional label name with a float64 value (use empty string as label name if
-// not required), the value (optionally as float64 or uint64, determined by
-// useIntValue), and optionally an exemplar (use nil if not required). The
-// function returns the number of bytes written and any error encountered.
-func writeOpenMetricsSample(
- w enhancedWriter,
- name, suffix string,
- metric *dto.Metric,
- additionalLabelName string, additionalLabelValue float64,
- floatValue float64, intValue uint64, useIntValue bool,
- exemplar *dto.Exemplar,
-) (int, error) {
- written := 0
- n, err := writeOpenMetricsNameAndLabelPairs(
- w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
- )
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
- if useIntValue {
- n, err = writeUint(w, intValue)
- } else {
- n, err = writeOpenMetricsFloat(w, floatValue)
- }
- written += n
- if err != nil {
- return written, err
- }
- if metric.TimestampMs != nil {
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
- // TODO(beorn7): Format this directly without converting to a float first.
- n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000)
- written += n
- if err != nil {
- return written, err
- }
- }
- if exemplar != nil && len(exemplar.Label) > 0 {
- n, err = writeExemplar(w, exemplar)
- written += n
- if err != nil {
- return written, err
- }
- }
- err = w.WriteByte('\n')
- written++
- if err != nil {
- return written, err
- }
- return written, nil
-}
-
-// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but
-// formats the float in OpenMetrics style.
-func writeOpenMetricsNameAndLabelPairs(
- w enhancedWriter,
- name string,
- in []*dto.LabelPair,
- additionalLabelName string, additionalLabelValue float64,
-) (int, error) {
- var (
- written int
- separator byte = '{'
- metricInsideBraces = false
- )
-
- if name != "" {
- // If the name does not pass the legacy validity check, we must put the
- // metric name inside the braces, quoted.
- if !model.IsValidLegacyMetricName(name) {
- metricInsideBraces = true
- err := w.WriteByte(separator)
- written++
- if err != nil {
- return written, err
- }
- separator = ','
- }
-
- n, err := writeName(w, name)
- written += n
- if err != nil {
- return written, err
- }
- }
-
- if len(in) == 0 && additionalLabelName == "" {
- if metricInsideBraces {
- err := w.WriteByte('}')
- written++
- if err != nil {
- return written, err
- }
- }
- return written, nil
- }
-
- for _, lp := range in {
- err := w.WriteByte(separator)
- written++
- if err != nil {
- return written, err
- }
- n, err := writeName(w, lp.GetName())
- written += n
- if err != nil {
- return written, err
- }
- n, err = w.WriteString(`="`)
- written += n
- if err != nil {
- return written, err
- }
- n, err = writeEscapedString(w, lp.GetValue(), true)
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte('"')
- written++
- if err != nil {
- return written, err
- }
- separator = ','
- }
- if additionalLabelName != "" {
- err := w.WriteByte(separator)
- written++
- if err != nil {
- return written, err
- }
- n, err := w.WriteString(additionalLabelName)
- written += n
- if err != nil {
- return written, err
- }
- n, err = w.WriteString(`="`)
- written += n
- if err != nil {
- return written, err
- }
- n, err = writeOpenMetricsFloat(w, additionalLabelValue)
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte('"')
- written++
- if err != nil {
- return written, err
- }
- }
- err := w.WriteByte('}')
- written++
- if err != nil {
- return written, err
- }
- return written, nil
-}
-
-// writeOpenMetricsCreated writes the created timestamp for a single time series
-// following OpenMetrics text format to w, given the metric name, the metric proto
-// message itself, optionally a suffix to be removed, e.g. '_total' for counters,
-// an additional label name with a float64 value (use empty string as label name if
-// not required) and the timestamp that represents the created timestamp.
-// The function returns the number of bytes written and any error encountered.
-func writeOpenMetricsCreated(w enhancedWriter,
- name, suffixToTrim string, metric *dto.Metric,
- additionalLabelName string, additionalLabelValue float64,
- createdTimestamp *timestamppb.Timestamp,
-) (int, error) {
- written := 0
- n, err := writeOpenMetricsNameAndLabelPairs(
- w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue,
- )
- written += n
- if err != nil {
- return written, err
- }
-
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
-
- // TODO(beorn7): Format this directly from components of ts to
- // avoid overflow/underflow and precision issues of the float
- // conversion.
- n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9)
- written += n
- if err != nil {
- return written, err
- }
-
- err = w.WriteByte('\n')
- written++
- if err != nil {
- return written, err
- }
- return written, nil
-}
-
-// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
-// function returns the number of bytes written and any error encountered.
-func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
- written := 0
- n, err := w.WriteString(" # ")
- written += n
- if err != nil {
- return written, err
- }
- n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0)
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
- n, err = writeOpenMetricsFloat(w, e.GetValue())
- written += n
- if err != nil {
- return written, err
- }
- if e.Timestamp != nil {
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
- err = (*e).Timestamp.CheckValid()
- if err != nil {
- return written, err
- }
- ts := (*e).Timestamp.AsTime()
- // TODO(beorn7): Format this directly from components of ts to
- // avoid overflow/underflow and precision issues of the float
- // conversion.
- n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9)
- written += n
- if err != nil {
- return written, err
- }
- }
- return written, nil
-}
-
-// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting
-// number would otherwise contain neither a "." nor an "e".
-func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) {
- switch {
- case f == 1:
- return w.WriteString("1.0")
- case f == 0:
- return w.WriteString("0.0")
- case f == -1:
- return w.WriteString("-1.0")
- case math.IsNaN(f):
- return w.WriteString("NaN")
- case math.IsInf(f, +1):
- return w.WriteString("+Inf")
- case math.IsInf(f, -1):
- return w.WriteString("-Inf")
- default:
- bp := numBufPool.Get().(*[]byte)
- *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
- if !bytes.ContainsAny(*bp, "e.") {
- *bp = append(*bp, '.', '0')
- }
- written, err := w.Write(*bp)
- numBufPool.Put(bp)
- return written, err
- }
-}
-
-// writeUint is like writeInt just for uint64.
-func writeUint(w enhancedWriter, u uint64) (int, error) {
- bp := numBufPool.Get().(*[]byte)
- *bp = strconv.AppendUint((*bp)[:0], u, 10)
- written, err := w.Write(*bp)
- numBufPool.Put(bp)
- return written, err
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
deleted file mode 100644
index 4b86434b3..000000000
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ /dev/null
@@ -1,520 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "bufio"
- "fmt"
- "io"
- "math"
- "strconv"
- "strings"
- "sync"
-
- "github.com/prometheus/common/model"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// enhancedWriter has all the enhanced write functions needed here. bufio.Writer
-// implements it.
-type enhancedWriter interface {
- io.Writer
- WriteRune(r rune) (n int, err error)
- WriteString(s string) (n int, err error)
- WriteByte(c byte) error
-}
-
-const (
- initialNumBufSize = 24
-)
-
-var (
- bufPool = sync.Pool{
- New: func() interface{} {
- return bufio.NewWriter(io.Discard)
- },
- }
- numBufPool = sync.Pool{
- New: func() interface{} {
- b := make([]byte, 0, initialNumBufSize)
- return &b
- },
- }
-)
-
-// MetricFamilyToText converts a MetricFamily proto message into text format and
-// writes the resulting lines to 'out'. It returns the number of bytes written
-// and any error encountered. The output will have the same order as the input,
-// no further sorting is performed. Furthermore, this function assumes the input
-// is already sanitized and does not perform any sanity checks. If the input
-// contains duplicate metrics or invalid metric or label names, the conversion
-// will result in invalid text format output.
-//
-// If metric names conform to the legacy validation pattern, they will be placed
-// outside the brackets in the traditional way, like `foo{}`. If the metric name
-// fails the legacy validation check, it will be placed quoted inside the
-// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
-// no error will be thrown in this case.
-//
-// Similar to metric names, if label names conform to the legacy validation
-// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
-// name fails the legacy validation check, it will be quoted:
-// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
-// no error will be thrown in this case.
-//
-// This method fulfills the type 'prometheus.encoder'.
-func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
- // Fail-fast checks.
- if len(in.Metric) == 0 {
- return 0, fmt.Errorf("MetricFamily has no metrics: %s", in)
- }
- name := in.GetName()
- if name == "" {
- return 0, fmt.Errorf("MetricFamily has no name: %s", in)
- }
-
- // Try the interface upgrade. If it doesn't work, we'll use a
- // bufio.Writer from the sync.Pool.
- w, ok := out.(enhancedWriter)
- if !ok {
- b := bufPool.Get().(*bufio.Writer)
- b.Reset(out)
- w = b
- defer func() {
- bErr := b.Flush()
- if err == nil {
- err = bErr
- }
- bufPool.Put(b)
- }()
- }
-
- var n int
-
- // Comments, first HELP, then TYPE.
- if in.Help != nil {
- n, err = w.WriteString("# HELP ")
- written += n
- if err != nil {
- return
- }
- n, err = writeName(w, name)
- written += n
- if err != nil {
- return
- }
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return
- }
- n, err = writeEscapedString(w, *in.Help, false)
- written += n
- if err != nil {
- return
- }
- err = w.WriteByte('\n')
- written++
- if err != nil {
- return
- }
- }
- n, err = w.WriteString("# TYPE ")
- written += n
- if err != nil {
- return
- }
- n, err = writeName(w, name)
- written += n
- if err != nil {
- return
- }
- metricType := in.GetType()
- switch metricType {
- case dto.MetricType_COUNTER:
- n, err = w.WriteString(" counter\n")
- case dto.MetricType_GAUGE:
- n, err = w.WriteString(" gauge\n")
- case dto.MetricType_SUMMARY:
- n, err = w.WriteString(" summary\n")
- case dto.MetricType_UNTYPED:
- n, err = w.WriteString(" untyped\n")
- case dto.MetricType_HISTOGRAM:
- n, err = w.WriteString(" histogram\n")
- default:
- return written, fmt.Errorf("unknown metric type %s", metricType.String())
- }
- written += n
- if err != nil {
- return
- }
-
- // Finally the samples, one line for each.
- for _, metric := range in.Metric {
- switch metricType {
- case dto.MetricType_COUNTER:
- if metric.Counter == nil {
- return written, fmt.Errorf(
- "expected counter in metric %s %s", name, metric,
- )
- }
- n, err = writeSample(
- w, name, "", metric, "", 0,
- metric.Counter.GetValue(),
- )
- case dto.MetricType_GAUGE:
- if metric.Gauge == nil {
- return written, fmt.Errorf(
- "expected gauge in metric %s %s", name, metric,
- )
- }
- n, err = writeSample(
- w, name, "", metric, "", 0,
- metric.Gauge.GetValue(),
- )
- case dto.MetricType_UNTYPED:
- if metric.Untyped == nil {
- return written, fmt.Errorf(
- "expected untyped in metric %s %s", name, metric,
- )
- }
- n, err = writeSample(
- w, name, "", metric, "", 0,
- metric.Untyped.GetValue(),
- )
- case dto.MetricType_SUMMARY:
- if metric.Summary == nil {
- return written, fmt.Errorf(
- "expected summary in metric %s %s", name, metric,
- )
- }
- for _, q := range metric.Summary.Quantile {
- n, err = writeSample(
- w, name, "", metric,
- model.QuantileLabel, q.GetQuantile(),
- q.GetValue(),
- )
- written += n
- if err != nil {
- return
- }
- }
- n, err = writeSample(
- w, name, "_sum", metric, "", 0,
- metric.Summary.GetSampleSum(),
- )
- written += n
- if err != nil {
- return
- }
- n, err = writeSample(
- w, name, "_count", metric, "", 0,
- float64(metric.Summary.GetSampleCount()),
- )
- case dto.MetricType_HISTOGRAM:
- if metric.Histogram == nil {
- return written, fmt.Errorf(
- "expected histogram in metric %s %s", name, metric,
- )
- }
- infSeen := false
- for _, b := range metric.Histogram.Bucket {
- n, err = writeSample(
- w, name, "_bucket", metric,
- model.BucketLabel, b.GetUpperBound(),
- float64(b.GetCumulativeCount()),
- )
- written += n
- if err != nil {
- return
- }
- if math.IsInf(b.GetUpperBound(), +1) {
- infSeen = true
- }
- }
- if !infSeen {
- n, err = writeSample(
- w, name, "_bucket", metric,
- model.BucketLabel, math.Inf(+1),
- float64(metric.Histogram.GetSampleCount()),
- )
- written += n
- if err != nil {
- return
- }
- }
- n, err = writeSample(
- w, name, "_sum", metric, "", 0,
- metric.Histogram.GetSampleSum(),
- )
- written += n
- if err != nil {
- return
- }
- n, err = writeSample(
- w, name, "_count", metric, "", 0,
- float64(metric.Histogram.GetSampleCount()),
- )
- default:
- return written, fmt.Errorf(
- "unexpected type in metric %s %s", name, metric,
- )
- }
- written += n
- if err != nil {
- return
- }
- }
- return
-}
-
-// writeSample writes a single sample in text format to w, given the metric
-// name, the metric proto message itself, optionally an additional label name
-// with a float64 value (use empty string as label name if not required), and
-// the value. The function returns the number of bytes written and any error
-// encountered.
-func writeSample(
- w enhancedWriter,
- name, suffix string,
- metric *dto.Metric,
- additionalLabelName string, additionalLabelValue float64,
- value float64,
-) (int, error) {
- written := 0
- n, err := writeNameAndLabelPairs(
- w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
- )
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
- n, err = writeFloat(w, value)
- written += n
- if err != nil {
- return written, err
- }
- if metric.TimestampMs != nil {
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
- n, err = writeInt(w, *metric.TimestampMs)
- written += n
- if err != nil {
- return written, err
- }
- }
- err = w.WriteByte('\n')
- written++
- if err != nil {
- return written, err
- }
- return written, nil
-}
-
-// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the
-// explicitly given metric name and additional label pair into text formatted as
-// required by the text format and writes it to 'w'. An empty slice in
-// combination with an empty string 'additionalLabelName' results in nothing
-// being written. Otherwise, the label pairs are written, escaped as required by
-// the text format, and enclosed in '{...}'. The function returns the number of
-// bytes written and any error encountered. If the metric name is not
-// legacy-valid, it will be put inside the brackets as well. Legacy-invalid
-// label names will also be quoted.
-func writeNameAndLabelPairs(
- w enhancedWriter,
- name string,
- in []*dto.LabelPair,
- additionalLabelName string, additionalLabelValue float64,
-) (int, error) {
- var (
- written int
- separator byte = '{'
- metricInsideBraces = false
- )
-
- if name != "" {
- // If the name does not pass the legacy validity check, we must put the
- // metric name inside the braces.
- if !model.IsValidLegacyMetricName(name) {
- metricInsideBraces = true
- err := w.WriteByte(separator)
- written++
- if err != nil {
- return written, err
- }
- separator = ','
- }
- n, err := writeName(w, name)
- written += n
- if err != nil {
- return written, err
- }
- }
-
- if len(in) == 0 && additionalLabelName == "" {
- if metricInsideBraces {
- err := w.WriteByte('}')
- written++
- if err != nil {
- return written, err
- }
- }
- return written, nil
- }
-
- for _, lp := range in {
- err := w.WriteByte(separator)
- written++
- if err != nil {
- return written, err
- }
- n, err := writeName(w, lp.GetName())
- written += n
- if err != nil {
- return written, err
- }
- n, err = w.WriteString(`="`)
- written += n
- if err != nil {
- return written, err
- }
- n, err = writeEscapedString(w, lp.GetValue(), true)
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte('"')
- written++
- if err != nil {
- return written, err
- }
- separator = ','
- }
- if additionalLabelName != "" {
- err := w.WriteByte(separator)
- written++
- if err != nil {
- return written, err
- }
- n, err := w.WriteString(additionalLabelName)
- written += n
- if err != nil {
- return written, err
- }
- n, err = w.WriteString(`="`)
- written += n
- if err != nil {
- return written, err
- }
- n, err = writeFloat(w, additionalLabelValue)
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte('"')
- written++
- if err != nil {
- return written, err
- }
- }
- err := w.WriteByte('}')
- written++
- if err != nil {
- return written, err
- }
- return written, nil
-}
-
-// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if
-// includeDoubleQuote is true - '"' by '\"'.
-var (
- escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`)
- quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
-)
-
-func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
- if includeDoubleQuote {
- return quotedEscaper.WriteString(w, v)
- }
- return escaper.WriteString(w, v)
-}
-
-// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
-// a few common cases for increased efficiency. For non-hardcoded cases, it uses
-// strconv.AppendFloat to avoid allocations, similar to writeInt.
-func writeFloat(w enhancedWriter, f float64) (int, error) {
- switch {
- case f == 1:
- return 1, w.WriteByte('1')
- case f == 0:
- return 1, w.WriteByte('0')
- case f == -1:
- return w.WriteString("-1")
- case math.IsNaN(f):
- return w.WriteString("NaN")
- case math.IsInf(f, +1):
- return w.WriteString("+Inf")
- case math.IsInf(f, -1):
- return w.WriteString("-Inf")
- default:
- bp := numBufPool.Get().(*[]byte)
- *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
- written, err := w.Write(*bp)
- numBufPool.Put(bp)
- return written, err
- }
-}
-
-// writeInt is equivalent to fmt.Fprint with an int64 argument but uses
-// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid
-// allocations.
-func writeInt(w enhancedWriter, i int64) (int, error) {
- bp := numBufPool.Get().(*[]byte)
- *bp = strconv.AppendInt((*bp)[:0], i, 10)
- written, err := w.Write(*bp)
- numBufPool.Put(bp)
- return written, err
-}
-
-// writeName writes a string as-is if it complies with the legacy naming
-// scheme, or escapes it in double quotes if not.
-func writeName(w enhancedWriter, name string) (int, error) {
- if model.IsValidLegacyMetricName(name) {
- return w.WriteString(name)
- }
- var written int
- var err error
- err = w.WriteByte('"')
- written++
- if err != nil {
- return written, err
- }
- var n int
- n, err = writeEscapedString(w, name, true)
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte('"')
- written++
- return written, err
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
deleted file mode 100644
index b4607fe4d..000000000
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ /dev/null
@@ -1,901 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "math"
- "strconv"
- "strings"
- "unicode/utf8"
-
- dto "github.com/prometheus/client_model/go"
- "google.golang.org/protobuf/proto"
-
- "github.com/prometheus/common/model"
-)
-
-// A stateFn is a function that represents a state in a state machine. By
-// executing it, the state is progressed to the next state. The stateFn returns
-// another stateFn, which represents the new state. The end state is represented
-// by nil.
-type stateFn func() stateFn
-
-// ParseError signals errors while parsing the simple and flat text-based
-// exchange format.
-type ParseError struct {
- Line int
- Msg string
-}
-
-// Error implements the error interface.
-func (e ParseError) Error() string {
- return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
-}
-
-// TextParser is used to parse the simple and flat text-based exchange format. Its
-// zero value is ready to use.
-type TextParser struct {
- metricFamiliesByName map[string]*dto.MetricFamily
- buf *bufio.Reader // Where the parsed input is read through.
- err error // Most recent error.
- lineCount int // Tracks the line count for error messages.
- currentByte byte // The most recent byte read.
- currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
- currentMF *dto.MetricFamily
- currentMetric *dto.Metric
- currentLabelPair *dto.LabelPair
- currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line.
-
- // The remaining member variables are only used for summaries/histograms.
- currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
- // Summary specific.
- summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
- currentQuantile float64
- // Histogram specific.
- histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
- currentBucket float64
- // These tell us if the currently processed line ends on '_count' or
- // '_sum' respectively and belong to a summary/histogram, representing the sample
- // count and sum of that summary/histogram.
- currentIsSummaryCount, currentIsSummarySum bool
- currentIsHistogramCount, currentIsHistogramSum bool
- // These indicate if the metric name from the current line being parsed is inside
- // braces and if that metric name was found respectively.
- currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool
-}
-
-// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
-// format and creates MetricFamily proto messages. It returns the MetricFamily
-// proto messages in a map where the metric names are the keys, along with any
-// error encountered.
-//
-// If the input contains duplicate metrics (i.e. lines with the same metric name
-// and exactly the same label set), the resulting MetricFamily will contain
-// duplicate Metric proto messages. Similar is true for duplicate label
-// names. Checks for duplicates have to be performed separately, if required.
-// Also note that neither the metrics within each MetricFamily are sorted nor
-// the label pairs within each Metric. Sorting is not required for the most
-// frequent use of this method, which is sample ingestion in the Prometheus
-// server. However, for presentation purposes, you might want to sort the
-// metrics, and in some cases, you must sort the labels, e.g. for consumption by
-// the metric family injection hook of the Prometheus registry.
-//
-// Summaries and histograms are rather special beasts. You would probably not
-// use them in the simple text format anyway. This method can deal with
-// summaries and histograms if they are presented in exactly the way the
-// text.Create function creates them.
-//
-// This method must not be called concurrently. If you want to parse different
-// input concurrently, instantiate a separate Parser for each goroutine.
-func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
- p.reset(in)
- for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
- // Magic happens here...
- }
- // Get rid of empty metric families.
- for k, mf := range p.metricFamiliesByName {
- if len(mf.GetMetric()) == 0 {
- delete(p.metricFamiliesByName, k)
- }
- }
- // If p.err is io.EOF now, we have run into a premature end of the input
- // stream. Turn this error into something nicer and more
- // meaningful. (io.EOF is often used as a signal for the legitimate end
- // of an input stream.)
- if p.err != nil && errors.Is(p.err, io.EOF) {
- p.parseError("unexpected end of input stream")
- }
- return p.metricFamiliesByName, p.err
-}
-
-func (p *TextParser) reset(in io.Reader) {
- p.metricFamiliesByName = map[string]*dto.MetricFamily{}
- if p.buf == nil {
- p.buf = bufio.NewReader(in)
- } else {
- p.buf.Reset(in)
- }
- p.err = nil
- p.lineCount = 0
- if p.summaries == nil || len(p.summaries) > 0 {
- p.summaries = map[uint64]*dto.Metric{}
- }
- if p.histograms == nil || len(p.histograms) > 0 {
- p.histograms = map[uint64]*dto.Metric{}
- }
- p.currentQuantile = math.NaN()
- p.currentBucket = math.NaN()
- p.currentMF = nil
-}
-
-// startOfLine represents the state where the next byte read from p.buf is the
-// start of a line (or whitespace leading up to it).
-func (p *TextParser) startOfLine() stateFn {
- p.lineCount++
- p.currentMetricIsInsideBraces = false
- p.currentMetricInsideBracesIsPresent = false
- if p.skipBlankTab(); p.err != nil {
- // This is the only place that we expect to see io.EOF,
- // which is not an error but the signal that we are done.
- // Any other error that happens to align with the start of
- // a line is still an error.
- if errors.Is(p.err, io.EOF) {
- p.err = nil
- }
- return nil
- }
- switch p.currentByte {
- case '#':
- return p.startComment
- case '\n':
- return p.startOfLine // Empty line, start the next one.
- case '{':
- p.currentMetricIsInsideBraces = true
- return p.readingLabels
- }
- return p.readingMetricName
-}
-
-// startComment represents the state where the next byte read from p.buf is the
-// start of a comment (or whitespace leading up to it).
-func (p *TextParser) startComment() stateFn {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte == '\n' {
- return p.startOfLine
- }
- if p.readTokenUntilWhitespace(); p.err != nil {
- return nil // Unexpected end of input.
- }
- // If we have hit the end of line already, there is nothing left
- // to do. This is not considered a syntax error.
- if p.currentByte == '\n' {
- return p.startOfLine
- }
- keyword := p.currentToken.String()
- if keyword != "HELP" && keyword != "TYPE" {
- // Generic comment, ignore by fast forwarding to end of line.
- for p.currentByte != '\n' {
- if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
- return nil // Unexpected end of input.
- }
- }
- return p.startOfLine
- }
- // There is something. Next has to be a metric name.
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.readTokenAsMetricName(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte == '\n' {
- // At the end of the line already.
- // Again, this is not considered a syntax error.
- return p.startOfLine
- }
- if !isBlankOrTab(p.currentByte) {
- p.parseError("invalid metric name in comment")
- return nil
- }
- p.setOrCreateCurrentMF()
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte == '\n' {
- // At the end of the line already.
- // Again, this is not considered a syntax error.
- return p.startOfLine
- }
- switch keyword {
- case "HELP":
- return p.readingHelp
- case "TYPE":
- return p.readingType
- }
- panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
-}
-
-// readingMetricName represents the state where the last byte read (now in
-// p.currentByte) is the first byte of a metric name.
-func (p *TextParser) readingMetricName() stateFn {
- if p.readTokenAsMetricName(); p.err != nil {
- return nil
- }
- if p.currentToken.Len() == 0 {
- p.parseError("invalid metric name")
- return nil
- }
- p.setOrCreateCurrentMF()
- // Now is the time to fix the type if it hasn't happened yet.
- if p.currentMF.Type == nil {
- p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
- }
- p.currentMetric = &dto.Metric{}
- // Do not append the newly created currentMetric to
- // currentMF.Metric right now. First wait if this is a summary,
- // and the metric exists already, which we can only know after
- // having read all the labels.
- if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- return p.readingLabels
-}
-
-// readingLabels represents the state where the last byte read (now in
-// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
-// first byte of the value (otherwise).
-func (p *TextParser) readingLabels() stateFn {
- // Summaries/histograms are special. We have to reset the
- // currentLabels map, currentQuantile and currentBucket before starting to
- // read labels.
- if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
- p.currentLabels = map[string]string{}
- p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
- p.currentQuantile = math.NaN()
- p.currentBucket = math.NaN()
- }
- if p.currentByte != '{' {
- return p.readingValue
- }
- return p.startLabelName
-}
-
-// startLabelName represents the state where the next byte read from p.buf is
-// the start of a label name (or whitespace leading up to it).
-func (p *TextParser) startLabelName() stateFn {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte == '}' {
- p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
- p.currentLabelPairs = nil
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- return p.readingValue
- }
- if p.readTokenAsLabelName(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentToken.Len() == 0 {
- p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
- return nil
- }
- if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte != '=' {
- if p.currentMetricIsInsideBraces {
- if p.currentMetricInsideBracesIsPresent {
- p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName()))
- return nil
- }
- switch p.currentByte {
- case ',':
- p.setOrCreateCurrentMF()
- if p.currentMF.Type == nil {
- p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
- }
- p.currentMetric = &dto.Metric{}
- p.currentMetricInsideBracesIsPresent = true
- return p.startLabelName
- case '}':
- p.setOrCreateCurrentMF()
- if p.currentMF.Type == nil {
- p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
- }
- p.currentMetric = &dto.Metric{}
- p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
- p.currentLabelPairs = nil
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- return p.readingValue
- default:
- p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte))
- return nil
- }
- }
- p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
- p.currentLabelPairs = nil
- return nil
- }
- p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
- if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
- p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
- return nil
- }
- // Special summary/histogram treatment. Don't add 'quantile' and 'le'
- // labels to 'real' labels.
- if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
- !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
- p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
- }
- // Check for duplicate label names.
- labels := make(map[string]struct{})
- for _, l := range p.currentLabelPairs {
- lName := l.GetName()
- if _, exists := labels[lName]; !exists {
- labels[lName] = struct{}{}
- } else {
- p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
- p.currentLabelPairs = nil
- return nil
- }
- }
- return p.startLabelValue
-}
-
-// startLabelValue represents the state where the next byte read from p.buf is
-// the start of a (quoted) label value (or whitespace leading up to it).
-func (p *TextParser) startLabelValue() stateFn {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte != '"' {
- p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
- return nil
- }
- if p.readTokenAsLabelValue(); p.err != nil {
- return nil
- }
- if !model.LabelValue(p.currentToken.String()).IsValid() {
- p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
- return nil
- }
- p.currentLabelPair.Value = proto.String(p.currentToken.String())
- // Special treatment of summaries:
- // - Quantile labels are special, will result in dto.Quantile later.
- // - Other labels have to be added to currentLabels for signature calculation.
- if p.currentMF.GetType() == dto.MetricType_SUMMARY {
- if p.currentLabelPair.GetName() == model.QuantileLabel {
- if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
- // Create a more helpful error message.
- p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
- p.currentLabelPairs = nil
- return nil
- }
- } else {
- p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
- }
- }
- // Similar special treatment of histograms.
- if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
- if p.currentLabelPair.GetName() == model.BucketLabel {
- if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
- // Create a more helpful error message.
- p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
- return nil
- }
- } else {
- p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
- }
- }
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- switch p.currentByte {
- case ',':
- return p.startLabelName
-
- case '}':
- if p.currentMF == nil {
- p.parseError("invalid metric name")
- return nil
- }
- p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
- p.currentLabelPairs = nil
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- return p.readingValue
- default:
- p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
- p.currentLabelPairs = nil
- return nil
- }
-}
-
-// readingValue represents the state where the last byte read (now in
-// p.currentByte) is the first byte of the sample value (i.e. a float).
-func (p *TextParser) readingValue() stateFn {
- // When we are here, we have read all the labels, so for the
- // special case of a summary/histogram, we can finally find out
- // if the metric already exists.
- if p.currentMF.GetType() == dto.MetricType_SUMMARY {
- signature := model.LabelsToSignature(p.currentLabels)
- if summary := p.summaries[signature]; summary != nil {
- p.currentMetric = summary
- } else {
- p.summaries[signature] = p.currentMetric
- p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
- }
- } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
- signature := model.LabelsToSignature(p.currentLabels)
- if histogram := p.histograms[signature]; histogram != nil {
- p.currentMetric = histogram
- } else {
- p.histograms[signature] = p.currentMetric
- p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
- }
- } else {
- p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
- }
- if p.readTokenUntilWhitespace(); p.err != nil {
- return nil // Unexpected end of input.
- }
- value, err := parseFloat(p.currentToken.String())
- if err != nil {
- // Create a more helpful error message.
- p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
- return nil
- }
- switch p.currentMF.GetType() {
- case dto.MetricType_COUNTER:
- p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
- case dto.MetricType_GAUGE:
- p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
- case dto.MetricType_UNTYPED:
- p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
- case dto.MetricType_SUMMARY:
- // *sigh*
- if p.currentMetric.Summary == nil {
- p.currentMetric.Summary = &dto.Summary{}
- }
- switch {
- case p.currentIsSummaryCount:
- p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
- case p.currentIsSummarySum:
- p.currentMetric.Summary.SampleSum = proto.Float64(value)
- case !math.IsNaN(p.currentQuantile):
- p.currentMetric.Summary.Quantile = append(
- p.currentMetric.Summary.Quantile,
- &dto.Quantile{
- Quantile: proto.Float64(p.currentQuantile),
- Value: proto.Float64(value),
- },
- )
- }
- case dto.MetricType_HISTOGRAM:
- // *sigh*
- if p.currentMetric.Histogram == nil {
- p.currentMetric.Histogram = &dto.Histogram{}
- }
- switch {
- case p.currentIsHistogramCount:
- p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
- case p.currentIsHistogramSum:
- p.currentMetric.Histogram.SampleSum = proto.Float64(value)
- case !math.IsNaN(p.currentBucket):
- p.currentMetric.Histogram.Bucket = append(
- p.currentMetric.Histogram.Bucket,
- &dto.Bucket{
- UpperBound: proto.Float64(p.currentBucket),
- CumulativeCount: proto.Uint64(uint64(value)),
- },
- )
- }
- default:
- p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
- }
- if p.currentByte == '\n' {
- return p.startOfLine
- }
- return p.startTimestamp
-}
-
-// startTimestamp represents the state where the next byte read from p.buf is
-// the start of the timestamp (or whitespace leading up to it).
-func (p *TextParser) startTimestamp() stateFn {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.readTokenUntilWhitespace(); p.err != nil {
- return nil // Unexpected end of input.
- }
- timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
- if err != nil {
- // Create a more helpful error message.
- p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
- return nil
- }
- p.currentMetric.TimestampMs = proto.Int64(timestamp)
- if p.readTokenUntilNewline(false); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentToken.Len() > 0 {
- p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
- return nil
- }
- return p.startOfLine
-}
-
-// readingHelp represents the state where the last byte read (now in
-// p.currentByte) is the first byte of the docstring after 'HELP'.
-func (p *TextParser) readingHelp() stateFn {
- if p.currentMF.Help != nil {
- p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
- return nil
- }
- // Rest of line is the docstring.
- if p.readTokenUntilNewline(true); p.err != nil {
- return nil // Unexpected end of input.
- }
- p.currentMF.Help = proto.String(p.currentToken.String())
- return p.startOfLine
-}
-
-// readingType represents the state where the last byte read (now in
-// p.currentByte) is the first byte of the type hint after 'HELP'.
-func (p *TextParser) readingType() stateFn {
- if p.currentMF.Type != nil {
- p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
- return nil
- }
- // Rest of line is the type.
- if p.readTokenUntilNewline(false); p.err != nil {
- return nil // Unexpected end of input.
- }
- metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
- if !ok {
- p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
- return nil
- }
- p.currentMF.Type = dto.MetricType(metricType).Enum()
- return p.startOfLine
-}
-
-// parseError sets p.err to a ParseError at the current line with the given
-// message.
-func (p *TextParser) parseError(msg string) {
- p.err = ParseError{
- Line: p.lineCount,
- Msg: msg,
- }
-}
-
-// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
-// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
-func (p *TextParser) skipBlankTab() {
- for {
- if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
- return
- }
- }
-}
-
-// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
-// anything if p.currentByte is neither ' ' nor '\t'.
-func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
- if isBlankOrTab(p.currentByte) {
- p.skipBlankTab()
- }
-}
-
-// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
-// first byte considered is the byte already read (now in p.currentByte). The
-// first whitespace byte encountered is still copied into p.currentByte, but not
-// into p.currentToken.
-func (p *TextParser) readTokenUntilWhitespace() {
- p.currentToken.Reset()
- for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
- p.currentToken.WriteByte(p.currentByte)
- p.currentByte, p.err = p.buf.ReadByte()
- }
-}
-
-// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
-// byte considered is the byte already read (now in p.currentByte). The first
-// newline byte encountered is still copied into p.currentByte, but not into
-// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
-// recognized: '\\' translates into '\', and '\n' into a line-feed character.
-// All other escape sequences are invalid and cause an error.
-func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
- p.currentToken.Reset()
- escaped := false
- for p.err == nil {
- if recognizeEscapeSequence && escaped {
- switch p.currentByte {
- case '\\':
- p.currentToken.WriteByte(p.currentByte)
- case 'n':
- p.currentToken.WriteByte('\n')
- case '"':
- p.currentToken.WriteByte('"')
- default:
- p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
- return
- }
- escaped = false
- } else {
- switch p.currentByte {
- case '\n':
- return
- case '\\':
- escaped = true
- default:
- p.currentToken.WriteByte(p.currentByte)
- }
- }
- p.currentByte, p.err = p.buf.ReadByte()
- }
-}
-
-// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
-// The first byte considered is the byte already read (now in p.currentByte).
-// The first byte not part of a metric name is still copied into p.currentByte,
-// but not into p.currentToken.
-func (p *TextParser) readTokenAsMetricName() {
- p.currentToken.Reset()
- // A UTF-8 metric name must be quoted and may have escaped characters.
- quoted := false
- escaped := false
- if !isValidMetricNameStart(p.currentByte) {
- return
- }
- for p.err == nil {
- if escaped {
- switch p.currentByte {
- case '\\':
- p.currentToken.WriteByte(p.currentByte)
- case 'n':
- p.currentToken.WriteByte('\n')
- case '"':
- p.currentToken.WriteByte('"')
- default:
- p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
- return
- }
- escaped = false
- } else {
- switch p.currentByte {
- case '"':
- quoted = !quoted
- if !quoted {
- p.currentByte, p.err = p.buf.ReadByte()
- return
- }
- case '\n':
- p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String()))
- return
- case '\\':
- escaped = true
- default:
- p.currentToken.WriteByte(p.currentByte)
- }
- }
- p.currentByte, p.err = p.buf.ReadByte()
- if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') {
- return
- }
- }
-}
-
-// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
-// The first byte considered is the byte already read (now in p.currentByte).
-// The first byte not part of a label name is still copied into p.currentByte,
-// but not into p.currentToken.
-func (p *TextParser) readTokenAsLabelName() {
- p.currentToken.Reset()
- // A UTF-8 label name must be quoted and may have escaped characters.
- quoted := false
- escaped := false
- if !isValidLabelNameStart(p.currentByte) {
- return
- }
- for p.err == nil {
- if escaped {
- switch p.currentByte {
- case '\\':
- p.currentToken.WriteByte(p.currentByte)
- case 'n':
- p.currentToken.WriteByte('\n')
- case '"':
- p.currentToken.WriteByte('"')
- default:
- p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
- return
- }
- escaped = false
- } else {
- switch p.currentByte {
- case '"':
- quoted = !quoted
- if !quoted {
- p.currentByte, p.err = p.buf.ReadByte()
- return
- }
- case '\n':
- p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String()))
- return
- case '\\':
- escaped = true
- default:
- p.currentToken.WriteByte(p.currentByte)
- }
- }
- p.currentByte, p.err = p.buf.ReadByte()
- if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') {
- return
- }
- }
-}
-
-// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
-// In contrast to the other 'readTokenAs...' functions, which start with the
-// last read byte in p.currentByte, this method ignores p.currentByte and starts
-// with reading a new byte from p.buf. The first byte not part of a label value
-// is still copied into p.currentByte, but not into p.currentToken.
-func (p *TextParser) readTokenAsLabelValue() {
- p.currentToken.Reset()
- escaped := false
- for {
- if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
- return
- }
- if escaped {
- switch p.currentByte {
- case '"', '\\':
- p.currentToken.WriteByte(p.currentByte)
- case 'n':
- p.currentToken.WriteByte('\n')
- default:
- p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
- p.currentLabelPairs = nil
- return
- }
- escaped = false
- continue
- }
- switch p.currentByte {
- case '"':
- return
- case '\n':
- p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
- return
- case '\\':
- escaped = true
- default:
- p.currentToken.WriteByte(p.currentByte)
- }
- }
-}
-
-func (p *TextParser) setOrCreateCurrentMF() {
- p.currentIsSummaryCount = false
- p.currentIsSummarySum = false
- p.currentIsHistogramCount = false
- p.currentIsHistogramSum = false
- name := p.currentToken.String()
- if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
- return
- }
- // Try out if this is a _sum or _count for a summary/histogram.
- summaryName := summaryMetricName(name)
- if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
- if p.currentMF.GetType() == dto.MetricType_SUMMARY {
- if isCount(name) {
- p.currentIsSummaryCount = true
- }
- if isSum(name) {
- p.currentIsSummarySum = true
- }
- return
- }
- }
- histogramName := histogramMetricName(name)
- if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
- if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
- if isCount(name) {
- p.currentIsHistogramCount = true
- }
- if isSum(name) {
- p.currentIsHistogramSum = true
- }
- return
- }
- }
- p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
- p.metricFamiliesByName[name] = p.currentMF
-}
-
-func isValidLabelNameStart(b byte) bool {
- return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"'
-}
-
-func isValidLabelNameContinuation(b byte, quoted bool) bool {
- return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b)))
-}
-
-func isValidMetricNameStart(b byte) bool {
- return isValidLabelNameStart(b) || b == ':'
-}
-
-func isValidMetricNameContinuation(b byte, quoted bool) bool {
- return isValidLabelNameContinuation(b, quoted) || b == ':'
-}
-
-func isBlankOrTab(b byte) bool {
- return b == ' ' || b == '\t'
-}
-
-func isCount(name string) bool {
- return len(name) > 6 && name[len(name)-6:] == "_count"
-}
-
-func isSum(name string) bool {
- return len(name) > 4 && name[len(name)-4:] == "_sum"
-}
-
-func isBucket(name string) bool {
- return len(name) > 7 && name[len(name)-7:] == "_bucket"
-}
-
-func summaryMetricName(name string) string {
- switch {
- case isCount(name):
- return name[:len(name)-6]
- case isSum(name):
- return name[:len(name)-4]
- default:
- return name
- }
-}
-
-func histogramMetricName(name string) string {
- switch {
- case isCount(name):
- return name[:len(name)-6]
- case isSum(name):
- return name[:len(name)-4]
- case isBucket(name):
- return name[:len(name)-7]
- default:
- return name
- }
-}
-
-func parseFloat(s string) (float64, error) {
- if strings.ContainsAny(s, "pP_") {
- return 0, errors.New("unsupported character in float")
- }
- return strconv.ParseFloat(s, 64)
-}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
deleted file mode 100644
index bd3a39e3e..000000000
--- a/vendor/github.com/prometheus/common/model/alert.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "errors"
- "fmt"
- "time"
-)
-
-type AlertStatus string
-
-const (
- AlertFiring AlertStatus = "firing"
- AlertResolved AlertStatus = "resolved"
-)
-
-// Alert is a generic representation of an alert in the Prometheus eco-system.
-type Alert struct {
- // Label value pairs for purpose of aggregation, matching, and disposition
- // dispatching. This must minimally include an "alertname" label.
- Labels LabelSet `json:"labels"`
-
- // Extra key/value information which does not define alert identity.
- Annotations LabelSet `json:"annotations"`
-
- // The known time range for this alert. Both ends are optional.
- StartsAt time.Time `json:"startsAt,omitempty"`
- EndsAt time.Time `json:"endsAt,omitempty"`
- GeneratorURL string `json:"generatorURL"`
-}
-
-// Name returns the name of the alert. It is equivalent to the "alertname" label.
-func (a *Alert) Name() string {
- return string(a.Labels[AlertNameLabel])
-}
-
-// Fingerprint returns a unique hash for the alert. It is equivalent to
-// the fingerprint of the alert's label set.
-func (a *Alert) Fingerprint() Fingerprint {
- return a.Labels.Fingerprint()
-}
-
-func (a *Alert) String() string {
- s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
- if a.Resolved() {
- return s + "[resolved]"
- }
- return s + "[active]"
-}
-
-// Resolved returns true iff the activity interval ended in the past.
-func (a *Alert) Resolved() bool {
- return a.ResolvedAt(time.Now())
-}
-
-// ResolvedAt returns true off the activity interval ended before
-// the given timestamp.
-func (a *Alert) ResolvedAt(ts time.Time) bool {
- if a.EndsAt.IsZero() {
- return false
- }
- return !a.EndsAt.After(ts)
-}
-
-// Status returns the status of the alert.
-func (a *Alert) Status() AlertStatus {
- return a.StatusAt(time.Now())
-}
-
-// StatusAt returns the status of the alert at the given timestamp.
-func (a *Alert) StatusAt(ts time.Time) AlertStatus {
- if a.ResolvedAt(ts) {
- return AlertResolved
- }
- return AlertFiring
-}
-
-// Validate checks whether the alert data is inconsistent.
-func (a *Alert) Validate() error {
- if a.StartsAt.IsZero() {
- return errors.New("start time missing")
- }
- if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
- return errors.New("start time must be before end time")
- }
- if err := a.Labels.Validate(); err != nil {
- return fmt.Errorf("invalid label set: %w", err)
- }
- if len(a.Labels) == 0 {
- return errors.New("at least one label pair required")
- }
- if err := a.Annotations.Validate(); err != nil {
- return fmt.Errorf("invalid annotations: %w", err)
- }
- return nil
-}
-
-// Alert is a list of alerts that can be sorted in chronological order.
-type Alerts []*Alert
-
-func (as Alerts) Len() int { return len(as) }
-func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
-
-func (as Alerts) Less(i, j int) bool {
- if as[i].StartsAt.Before(as[j].StartsAt) {
- return true
- }
- if as[i].EndsAt.Before(as[j].EndsAt) {
- return true
- }
- return as[i].Fingerprint() < as[j].Fingerprint()
-}
-
-// HasFiring returns true iff one of the alerts is not resolved.
-func (as Alerts) HasFiring() bool {
- for _, a := range as {
- if !a.Resolved() {
- return true
- }
- }
- return false
-}
-
-// HasFiringAt returns true iff one of the alerts is not resolved
-// at the time ts.
-func (as Alerts) HasFiringAt(ts time.Time) bool {
- for _, a := range as {
- if !a.ResolvedAt(ts) {
- return true
- }
- }
- return false
-}
-
-// Status returns StatusFiring iff at least one of the alerts is firing.
-func (as Alerts) Status() AlertStatus {
- if as.HasFiring() {
- return AlertFiring
- }
- return AlertResolved
-}
-
-// StatusAt returns StatusFiring iff at least one of the alerts is firing
-// at the time ts.
-func (as Alerts) StatusAt(ts time.Time) AlertStatus {
- if as.HasFiringAt(ts) {
- return AlertFiring
- }
- return AlertResolved
-}
diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go
deleted file mode 100644
index fc4de4106..000000000
--- a/vendor/github.com/prometheus/common/model/fingerprinting.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "fmt"
- "strconv"
-)
-
-// Fingerprint provides a hash-capable representation of a Metric.
-// For our purposes, FNV-1A 64-bit is used.
-type Fingerprint uint64
-
-// FingerprintFromString transforms a string representation into a Fingerprint.
-func FingerprintFromString(s string) (Fingerprint, error) {
- num, err := strconv.ParseUint(s, 16, 64)
- return Fingerprint(num), err
-}
-
-// ParseFingerprint parses the input string into a fingerprint.
-func ParseFingerprint(s string) (Fingerprint, error) {
- num, err := strconv.ParseUint(s, 16, 64)
- if err != nil {
- return 0, err
- }
- return Fingerprint(num), nil
-}
-
-func (f Fingerprint) String() string {
- return fmt.Sprintf("%016x", uint64(f))
-}
-
-// Fingerprints represents a collection of Fingerprint subject to a given
-// natural sorting scheme. It implements sort.Interface.
-type Fingerprints []Fingerprint
-
-// Len implements sort.Interface.
-func (f Fingerprints) Len() int {
- return len(f)
-}
-
-// Less implements sort.Interface.
-func (f Fingerprints) Less(i, j int) bool {
- return f[i] < f[j]
-}
-
-// Swap implements sort.Interface.
-func (f Fingerprints) Swap(i, j int) {
- f[i], f[j] = f[j], f[i]
-}
-
-// FingerprintSet is a set of Fingerprints.
-type FingerprintSet map[Fingerprint]struct{}
-
-// Equal returns true if both sets contain the same elements (and not more).
-func (s FingerprintSet) Equal(o FingerprintSet) bool {
- if len(s) != len(o) {
- return false
- }
-
- for k := range s {
- if _, ok := o[k]; !ok {
- return false
- }
- }
-
- return true
-}
-
-// Intersection returns the elements contained in both sets.
-func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
- myLength, otherLength := len(s), len(o)
- if myLength == 0 || otherLength == 0 {
- return FingerprintSet{}
- }
-
- subSet := s
- superSet := o
-
- if otherLength < myLength {
- subSet = o
- superSet = s
- }
-
- out := FingerprintSet{}
-
- for k := range subSet {
- if _, ok := superSet[k]; ok {
- out[k] = struct{}{}
- }
- }
-
- return out
-}
diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go
deleted file mode 100644
index 367afecd3..000000000
--- a/vendor/github.com/prometheus/common/model/fnv.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-// Inline and byte-free variant of hash/fnv's fnv64a.
-
-const (
- offset64 = 14695981039346656037
- prime64 = 1099511628211
-)
-
-// hashNew initializes a new fnv64a hash value.
-func hashNew() uint64 {
- return offset64
-}
-
-// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
-func hashAdd(h uint64, s string) uint64 {
- for i := 0; i < len(s); i++ {
- h ^= uint64(s[i])
- h *= prime64
- }
- return h
-}
-
-// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
-func hashAddByte(h uint64, b byte) uint64 {
- h ^= uint64(b)
- h *= prime64
- return h
-}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
deleted file mode 100644
index 73b7aa3e6..000000000
--- a/vendor/github.com/prometheus/common/model/labels.go
+++ /dev/null
@@ -1,235 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
- "regexp"
- "strings"
- "unicode/utf8"
-)
-
-const (
- // AlertNameLabel is the name of the label containing the an alert's name.
- AlertNameLabel = "alertname"
-
- // ExportedLabelPrefix is the prefix to prepend to the label names present in
- // exported metrics if a label of the same name is added by the server.
- ExportedLabelPrefix = "exported_"
-
- // MetricNameLabel is the label name indicating the metric name of a
- // timeseries.
- MetricNameLabel = "__name__"
-
- // SchemeLabel is the name of the label that holds the scheme on which to
- // scrape a target.
- SchemeLabel = "__scheme__"
-
- // AddressLabel is the name of the label that holds the address of
- // a scrape target.
- AddressLabel = "__address__"
-
- // MetricsPathLabel is the name of the label that holds the path on which to
- // scrape a target.
- MetricsPathLabel = "__metrics_path__"
-
- // ScrapeIntervalLabel is the name of the label that holds the scrape interval
- // used to scrape a target.
- ScrapeIntervalLabel = "__scrape_interval__"
-
- // ScrapeTimeoutLabel is the name of the label that holds the scrape
- // timeout used to scrape a target.
- ScrapeTimeoutLabel = "__scrape_timeout__"
-
- // ReservedLabelPrefix is a prefix which is not legal in user-supplied
- // label names.
- ReservedLabelPrefix = "__"
-
- // MetaLabelPrefix is a prefix for labels that provide meta information.
- // Labels with this prefix are used for intermediate label processing and
- // will not be attached to time series.
- MetaLabelPrefix = "__meta_"
-
- // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
- // Labels with this prefix are used for intermediate label processing and
- // will not be attached to time series. This is reserved for use in
- // Prometheus configuration files by users.
- TmpLabelPrefix = "__tmp_"
-
- // ParamLabelPrefix is a prefix for labels that provide URL parameters
- // used to scrape a target.
- ParamLabelPrefix = "__param_"
-
- // JobLabel is the label name indicating the job from which a timeseries
- // was scraped.
- JobLabel = "job"
-
- // InstanceLabel is the label name used for the instance label.
- InstanceLabel = "instance"
-
- // BucketLabel is used for the label that defines the upper bound of a
- // bucket of a histogram ("le" -> "less or equal").
- BucketLabel = "le"
-
- // QuantileLabel is used for the label that defines the quantile in a
- // summary.
- QuantileLabel = "quantile"
-)
-
-// LabelNameRE is a regular expression matching valid label names. Note that the
-// IsValid method of LabelName performs the same check but faster than a match
-// with this regular expression.
-var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
-
-// A LabelName is a key for a LabelSet or Metric. It has a value associated
-// therewith.
-type LabelName string
-
-// IsValid returns true iff the name matches the pattern of LabelNameRE when
-// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if
-// NameValidationScheme is set to UTF8Validation.
-func (ln LabelName) IsValid() bool {
- if len(ln) == 0 {
- return false
- }
- switch NameValidationScheme {
- case LegacyValidation:
- return ln.IsValidLegacy()
- case UTF8Validation:
- return utf8.ValidString(string(ln))
- default:
- panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
- }
-}
-
-// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for
-// legacy names. It does not use LabelNameRE for the check but a much faster
-// hardcoded implementation.
-func (ln LabelName) IsValidLegacy() bool {
- if len(ln) == 0 {
- return false
- }
- for i, b := range ln {
- if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
- return false
- }
- }
- return true
-}
-
-// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var s string
- if err := unmarshal(&s); err != nil {
- return err
- }
- if !LabelName(s).IsValid() {
- return fmt.Errorf("%q is not a valid label name", s)
- }
- *ln = LabelName(s)
- return nil
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (ln *LabelName) UnmarshalJSON(b []byte) error {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
- return err
- }
- if !LabelName(s).IsValid() {
- return fmt.Errorf("%q is not a valid label name", s)
- }
- *ln = LabelName(s)
- return nil
-}
-
-// LabelNames is a sortable LabelName slice. In implements sort.Interface.
-type LabelNames []LabelName
-
-func (l LabelNames) Len() int {
- return len(l)
-}
-
-func (l LabelNames) Less(i, j int) bool {
- return l[i] < l[j]
-}
-
-func (l LabelNames) Swap(i, j int) {
- l[i], l[j] = l[j], l[i]
-}
-
-func (l LabelNames) String() string {
- labelStrings := make([]string, 0, len(l))
- for _, label := range l {
- labelStrings = append(labelStrings, string(label))
- }
- return strings.Join(labelStrings, ", ")
-}
-
-// A LabelValue is an associated value for a LabelName.
-type LabelValue string
-
-// IsValid returns true iff the string is a valid UTF-8.
-func (lv LabelValue) IsValid() bool {
- return utf8.ValidString(string(lv))
-}
-
-// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
-type LabelValues []LabelValue
-
-func (l LabelValues) Len() int {
- return len(l)
-}
-
-func (l LabelValues) Less(i, j int) bool {
- return string(l[i]) < string(l[j])
-}
-
-func (l LabelValues) Swap(i, j int) {
- l[i], l[j] = l[j], l[i]
-}
-
-// LabelPair pairs a name with a value.
-type LabelPair struct {
- Name LabelName
- Value LabelValue
-}
-
-// LabelPairs is a sortable slice of LabelPair pointers. It implements
-// sort.Interface.
-type LabelPairs []*LabelPair
-
-func (l LabelPairs) Len() int {
- return len(l)
-}
-
-func (l LabelPairs) Less(i, j int) bool {
- switch {
- case l[i].Name > l[j].Name:
- return false
- case l[i].Name < l[j].Name:
- return true
- case l[i].Value > l[j].Value:
- return false
- case l[i].Value < l[j].Value:
- return true
- default:
- return false
- }
-}
-
-func (l LabelPairs) Swap(i, j int) {
- l[i], l[j] = l[j], l[i]
-}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
deleted file mode 100644
index d0ad88da3..000000000
--- a/vendor/github.com/prometheus/common/model/labelset.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
- "sort"
-)
-
-// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
-// may be fully-qualified down to the point where it may resolve to a single
-// Metric in the data store or not. All operations that occur within the realm
-// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
-// match.
-type LabelSet map[LabelName]LabelValue
-
-// Validate checks whether all names and values in the label set
-// are valid.
-func (ls LabelSet) Validate() error {
- for ln, lv := range ls {
- if !ln.IsValid() {
- return fmt.Errorf("invalid name %q", ln)
- }
- if !lv.IsValid() {
- return fmt.Errorf("invalid value %q", lv)
- }
- }
- return nil
-}
-
-// Equal returns true iff both label sets have exactly the same key/value pairs.
-func (ls LabelSet) Equal(o LabelSet) bool {
- if len(ls) != len(o) {
- return false
- }
- for ln, lv := range ls {
- olv, ok := o[ln]
- if !ok {
- return false
- }
- if olv != lv {
- return false
- }
- }
- return true
-}
-
-// Before compares the metrics, using the following criteria:
-//
-// If m has fewer labels than o, it is before o. If it has more, it is not.
-//
-// If the number of labels is the same, the superset of all label names is
-// sorted alphanumerically. The first differing label pair found in that order
-// determines the outcome: If the label does not exist at all in m, then m is
-// before o, and vice versa. Otherwise the label value is compared
-// alphanumerically.
-//
-// If m and o are equal, the method returns false.
-func (ls LabelSet) Before(o LabelSet) bool {
- if len(ls) < len(o) {
- return true
- }
- if len(ls) > len(o) {
- return false
- }
-
- lns := make(LabelNames, 0, len(ls)+len(o))
- for ln := range ls {
- lns = append(lns, ln)
- }
- for ln := range o {
- lns = append(lns, ln)
- }
- // It's probably not worth it to de-dup lns.
- sort.Sort(lns)
- for _, ln := range lns {
- mlv, ok := ls[ln]
- if !ok {
- return true
- }
- olv, ok := o[ln]
- if !ok {
- return false
- }
- if mlv < olv {
- return true
- }
- if mlv > olv {
- return false
- }
- }
- return false
-}
-
-// Clone returns a copy of the label set.
-func (ls LabelSet) Clone() LabelSet {
- lsn := make(LabelSet, len(ls))
- for ln, lv := range ls {
- lsn[ln] = lv
- }
- return lsn
-}
-
-// Merge is a helper function to non-destructively merge two label sets.
-func (l LabelSet) Merge(other LabelSet) LabelSet {
- result := make(LabelSet, len(l))
-
- for k, v := range l {
- result[k] = v
- }
-
- for k, v := range other {
- result[k] = v
- }
-
- return result
-}
-
-// Fingerprint returns the LabelSet's fingerprint.
-func (ls LabelSet) Fingerprint() Fingerprint {
- return labelSetToFingerprint(ls)
-}
-
-// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
-// algorithm, which is, however, more susceptible to hash collisions.
-func (ls LabelSet) FastFingerprint() Fingerprint {
- return labelSetToFastFingerprint(ls)
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (l *LabelSet) UnmarshalJSON(b []byte) error {
- var m map[LabelName]LabelValue
- if err := json.Unmarshal(b, &m); err != nil {
- return err
- }
- // encoding/json only unmarshals maps of the form map[string]T. It treats
- // LabelName as a string and does not call its UnmarshalJSON method.
- // Thus, we have to replicate the behavior here.
- for ln := range m {
- if !ln.IsValid() {
- return fmt.Errorf("%q is not a valid label name", ln)
- }
- }
- *l = LabelSet(m)
- return nil
-}
diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go
deleted file mode 100644
index abb2c9001..000000000
--- a/vendor/github.com/prometheus/common/model/labelset_string.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "bytes"
- "slices"
- "strconv"
-)
-
-// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically.
-func (l LabelSet) String() string {
- var lna [32]string // On stack to avoid memory allocation for sorting names.
- labelNames := lna[:0]
- for name := range l {
- labelNames = append(labelNames, string(name))
- }
- slices.Sort(labelNames)
- var bytea [1024]byte // On stack to avoid memory allocation while building the output.
- b := bytes.NewBuffer(bytea[:0])
- b.WriteByte('{')
- for i, name := range labelNames {
- if i > 0 {
- b.WriteString(", ")
- }
- b.WriteString(name)
- b.WriteByte('=')
- b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)])))
- }
- b.WriteByte('}')
- return b.String()
-}
diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go
deleted file mode 100644
index 447ab8ad6..000000000
--- a/vendor/github.com/prometheus/common/model/metadata.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2023 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-// MetricType represents metric type values.
-type MetricType string
-
-const (
- MetricTypeCounter = MetricType("counter")
- MetricTypeGauge = MetricType("gauge")
- MetricTypeHistogram = MetricType("histogram")
- MetricTypeGaugeHistogram = MetricType("gaugehistogram")
- MetricTypeSummary = MetricType("summary")
- MetricTypeInfo = MetricType("info")
- MetricTypeStateset = MetricType("stateset")
- MetricTypeUnknown = MetricType("unknown")
-)
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
deleted file mode 100644
index 5766107cf..000000000
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ /dev/null
@@ -1,453 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "errors"
- "fmt"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "unicode/utf8"
-
- dto "github.com/prometheus/client_model/go"
- "google.golang.org/protobuf/proto"
-)
-
-var (
- // NameValidationScheme determines the method of name validation to be used by
- // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8
- // mode in isolation from other components that don't support UTF-8 may result
- // in bugs or other undefined behavior. This value can be set to
- // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To
- // avoid need for locking, this value should be set once, ideally in an
- // init(), before multiple goroutines are started.
- NameValidationScheme = UTF8Validation
-
- // NameEscapingScheme defines the default way that names will be escaped when
- // presented to systems that do not support UTF-8 names. If the Content-Type
- // "escaping" term is specified, that will override this value.
- // NameEscapingScheme should not be set to the NoEscaping value. That string
- // is used in content negotiation to indicate that a system supports UTF-8 and
- // has that feature enabled.
- NameEscapingScheme = UnderscoreEscaping
-)
-
-// ValidationScheme is a Go enum for determining how metric and label names will
-// be validated by this library.
-type ValidationScheme int
-
-const (
- // LegacyValidation is a setting that requirets that metric and label names
- // conform to the original Prometheus character requirements described by
- // MetricNameRE and LabelNameRE.
- LegacyValidation ValidationScheme = iota
-
- // UTF8Validation only requires that metric and label names be valid UTF-8
- // strings.
- UTF8Validation
-)
-
-type EscapingScheme int
-
-const (
- // NoEscaping indicates that a name will not be escaped. Unescaped names that
- // do not conform to the legacy validity check will use a new exposition
- // format syntax that will be officially standardized in future versions.
- NoEscaping EscapingScheme = iota
-
- // UnderscoreEscaping replaces all legacy-invalid characters with underscores.
- UnderscoreEscaping
-
- // DotsEscaping is similar to UnderscoreEscaping, except that dots are
- // converted to `_dot_` and pre-existing underscores are converted to `__`.
- DotsEscaping
-
- // ValueEncodingEscaping prepends the name with `U__` and replaces all invalid
- // characters with the unicode value, surrounded by underscores. Single
- // underscores are replaced with double underscores.
- ValueEncodingEscaping
-)
-
-const (
- // EscapingKey is the key in an Accept or Content-Type header that defines how
- // metric and label names that do not conform to the legacy character
- // requirements should be escaped when being scraped by a legacy prometheus
- // system. If a system does not explicitly pass an escaping parameter in the
- // Accept header, the default NameEscapingScheme will be used.
- EscapingKey = "escaping"
-
- // Possible values for Escaping Key:
- AllowUTF8 = "allow-utf-8" // No escaping required.
- EscapeUnderscores = "underscores"
- EscapeDots = "dots"
- EscapeValues = "values"
-)
-
-// MetricNameRE is a regular expression matching valid metric
-// names. Note that the IsValidMetricName function performs the same
-// check but faster than a match with this regular expression.
-var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
-
-// A Metric is similar to a LabelSet, but the key difference is that a Metric is
-// a singleton and refers to one and only one stream of samples.
-type Metric LabelSet
-
-// Equal compares the metrics.
-func (m Metric) Equal(o Metric) bool {
- return LabelSet(m).Equal(LabelSet(o))
-}
-
-// Before compares the metrics' underlying label sets.
-func (m Metric) Before(o Metric) bool {
- return LabelSet(m).Before(LabelSet(o))
-}
-
-// Clone returns a copy of the Metric.
-func (m Metric) Clone() Metric {
- clone := make(Metric, len(m))
- for k, v := range m {
- clone[k] = v
- }
- return clone
-}
-
-func (m Metric) String() string {
- metricName, hasName := m[MetricNameLabel]
- numLabels := len(m) - 1
- if !hasName {
- numLabels = len(m)
- }
- labelStrings := make([]string, 0, numLabels)
- for label, value := range m {
- if label != MetricNameLabel {
- labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
- }
- }
-
- switch numLabels {
- case 0:
- if hasName {
- return string(metricName)
- }
- return "{}"
- default:
- sort.Strings(labelStrings)
- return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
- }
-}
-
-// Fingerprint returns a Metric's Fingerprint.
-func (m Metric) Fingerprint() Fingerprint {
- return LabelSet(m).Fingerprint()
-}
-
-// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
-// algorithm, which is, however, more susceptible to hash collisions.
-func (m Metric) FastFingerprint() Fingerprint {
- return LabelSet(m).FastFingerprint()
-}
-
-// IsValidMetricName returns true iff name matches the pattern of MetricNameRE
-// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is
-// selected.
-func IsValidMetricName(n LabelValue) bool {
- switch NameValidationScheme {
- case LegacyValidation:
- return IsValidLegacyMetricName(string(n))
- case UTF8Validation:
- if len(n) == 0 {
- return false
- }
- return utf8.ValidString(string(n))
- default:
- panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
- }
-}
-
-// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the
-// legacy validation scheme regardless of the value of NameValidationScheme.
-// This function, however, does not use MetricNameRE for the check but a much
-// faster hardcoded implementation.
-func IsValidLegacyMetricName(n string) bool {
- if len(n) == 0 {
- return false
- }
- for i, b := range n {
- if !isValidLegacyRune(b, i) {
- return false
- }
- }
- return true
-}
-
-// EscapeMetricFamily escapes the given metric names and labels with the given
-// escaping scheme. Returns a new object that uses the same pointers to fields
-// when possible and creates new escaped versions so as not to mutate the
-// input.
-func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily {
- if v == nil {
- return nil
- }
-
- if scheme == NoEscaping {
- return v
- }
-
- out := &dto.MetricFamily{
- Help: v.Help,
- Type: v.Type,
- Unit: v.Unit,
- }
-
- // If the name is nil, copy as-is, don't try to escape.
- if v.Name == nil || IsValidLegacyMetricName(v.GetName()) {
- out.Name = v.Name
- } else {
- out.Name = proto.String(EscapeName(v.GetName(), scheme))
- }
- for _, m := range v.Metric {
- if !metricNeedsEscaping(m) {
- out.Metric = append(out.Metric, m)
- continue
- }
-
- escaped := &dto.Metric{
- Gauge: m.Gauge,
- Counter: m.Counter,
- Summary: m.Summary,
- Untyped: m.Untyped,
- Histogram: m.Histogram,
- TimestampMs: m.TimestampMs,
- }
-
- for _, l := range m.Label {
- if l.GetName() == MetricNameLabel {
- if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) {
- escaped.Label = append(escaped.Label, l)
- continue
- }
- escaped.Label = append(escaped.Label, &dto.LabelPair{
- Name: proto.String(MetricNameLabel),
- Value: proto.String(EscapeName(l.GetValue(), scheme)),
- })
- continue
- }
- if l.Name == nil || IsValidLegacyMetricName(l.GetName()) {
- escaped.Label = append(escaped.Label, l)
- continue
- }
- escaped.Label = append(escaped.Label, &dto.LabelPair{
- Name: proto.String(EscapeName(l.GetName(), scheme)),
- Value: l.Value,
- })
- }
- out.Metric = append(out.Metric, escaped)
- }
- return out
-}
-
-func metricNeedsEscaping(m *dto.Metric) bool {
- for _, l := range m.Label {
- if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) {
- return true
- }
- if !IsValidLegacyMetricName(l.GetName()) {
- return true
- }
- }
- return false
-}
-
-// EscapeName escapes the incoming name according to the provided escaping
-// scheme. Depending on the rules of escaping, this may cause no change in the
-// string that is returned. (Especially NoEscaping, which by definition is a
-// noop). This function does not do any validation of the name.
-func EscapeName(name string, scheme EscapingScheme) string {
- if len(name) == 0 {
- return name
- }
- var escaped strings.Builder
- switch scheme {
- case NoEscaping:
- return name
- case UnderscoreEscaping:
- if IsValidLegacyMetricName(name) {
- return name
- }
- for i, b := range name {
- if isValidLegacyRune(b, i) {
- escaped.WriteRune(b)
- } else {
- escaped.WriteRune('_')
- }
- }
- return escaped.String()
- case DotsEscaping:
- // Do not early return for legacy valid names, we still escape underscores.
- for i, b := range name {
- if b == '_' {
- escaped.WriteString("__")
- } else if b == '.' {
- escaped.WriteString("_dot_")
- } else if isValidLegacyRune(b, i) {
- escaped.WriteRune(b)
- } else {
- escaped.WriteString("__")
- }
- }
- return escaped.String()
- case ValueEncodingEscaping:
- if IsValidLegacyMetricName(name) {
- return name
- }
- escaped.WriteString("U__")
- for i, b := range name {
- if b == '_' {
- escaped.WriteString("__")
- } else if isValidLegacyRune(b, i) {
- escaped.WriteRune(b)
- } else if !utf8.ValidRune(b) {
- escaped.WriteString("_FFFD_")
- } else {
- escaped.WriteRune('_')
- escaped.WriteString(strconv.FormatInt(int64(b), 16))
- escaped.WriteRune('_')
- }
- }
- return escaped.String()
- default:
- panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
- }
-}
-
-// lower function taken from strconv.atoi
-func lower(c byte) byte {
- return c | ('x' - 'X')
-}
-
-// UnescapeName unescapes the incoming name according to the provided escaping
-// scheme if possible. Some schemes are partially or totally non-roundtripable.
-// If any error is enountered, returns the original input.
-func UnescapeName(name string, scheme EscapingScheme) string {
- if len(name) == 0 {
- return name
- }
- switch scheme {
- case NoEscaping:
- return name
- case UnderscoreEscaping:
- // It is not possible to unescape from underscore replacement.
- return name
- case DotsEscaping:
- name = strings.ReplaceAll(name, "_dot_", ".")
- name = strings.ReplaceAll(name, "__", "_")
- return name
- case ValueEncodingEscaping:
- escapedName, found := strings.CutPrefix(name, "U__")
- if !found {
- return name
- }
-
- var unescaped strings.Builder
- TOP:
- for i := 0; i < len(escapedName); i++ {
- // All non-underscores are treated normally.
- if escapedName[i] != '_' {
- unescaped.WriteByte(escapedName[i])
- continue
- }
- i++
- if i >= len(escapedName) {
- return name
- }
- // A double underscore is a single underscore.
- if escapedName[i] == '_' {
- unescaped.WriteByte('_')
- continue
- }
- // We think we are in a UTF-8 code, process it.
- var utf8Val uint
- for j := 0; i < len(escapedName); j++ {
- // This is too many characters for a utf8 value based on the MaxRune
- // value of '\U0010FFFF'.
- if j >= 6 {
- return name
- }
- // Found a closing underscore, convert to a rune, check validity, and append.
- if escapedName[i] == '_' {
- utf8Rune := rune(utf8Val)
- if !utf8.ValidRune(utf8Rune) {
- return name
- }
- unescaped.WriteRune(utf8Rune)
- continue TOP
- }
- r := lower(escapedName[i])
- utf8Val *= 16
- if r >= '0' && r <= '9' {
- utf8Val += uint(r) - '0'
- } else if r >= 'a' && r <= 'f' {
- utf8Val += uint(r) - 'a' + 10
- } else {
- return name
- }
- i++
- }
- // Didn't find closing underscore, invalid.
- return name
- }
- return unescaped.String()
- default:
- panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
- }
-}
-
-func isValidLegacyRune(b rune, i int) bool {
- return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)
-}
-
-func (e EscapingScheme) String() string {
- switch e {
- case NoEscaping:
- return AllowUTF8
- case UnderscoreEscaping:
- return EscapeUnderscores
- case DotsEscaping:
- return EscapeDots
- case ValueEncodingEscaping:
- return EscapeValues
- default:
- panic(fmt.Sprintf("unknown format scheme %d", e))
- }
-}
-
-func ToEscapingScheme(s string) (EscapingScheme, error) {
- if s == "" {
- return NoEscaping, errors.New("got empty string instead of escaping scheme")
- }
- switch s {
- case AllowUTF8:
- return NoEscaping, nil
- case EscapeUnderscores:
- return UnderscoreEscaping, nil
- case EscapeDots:
- return DotsEscaping, nil
- case EscapeValues:
- return ValueEncodingEscaping, nil
- default:
- return NoEscaping, fmt.Errorf("unknown format scheme %s", s)
- }
-}
diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go
deleted file mode 100644
index a7b969170..000000000
--- a/vendor/github.com/prometheus/common/model/model.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package model contains common data structures that are shared across
-// Prometheus components and libraries.
-package model
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
deleted file mode 100644
index dc8a0026c..000000000
--- a/vendor/github.com/prometheus/common/model/signature.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "sort"
-)
-
-// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
-// used to separate label names, label values, and other strings from each other
-// when calculating their combined hash value (aka signature aka fingerprint).
-const SeparatorByte byte = 255
-
-// cache the signature of an empty label set.
-var emptyLabelSignature = hashNew()
-
-// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
-// given label set. (Collisions are possible but unlikely if the number of label
-// sets the function is applied to is small.)
-func LabelsToSignature(labels map[string]string) uint64 {
- if len(labels) == 0 {
- return emptyLabelSignature
- }
-
- labelNames := make([]string, 0, len(labels))
- for labelName := range labels {
- labelNames = append(labelNames, labelName)
- }
- sort.Strings(labelNames)
-
- sum := hashNew()
- for _, labelName := range labelNames {
- sum = hashAdd(sum, labelName)
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, labels[labelName])
- sum = hashAddByte(sum, SeparatorByte)
- }
- return sum
-}
-
-// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
-// parameter (rather than a label map) and returns a Fingerprint.
-func labelSetToFingerprint(ls LabelSet) Fingerprint {
- if len(ls) == 0 {
- return Fingerprint(emptyLabelSignature)
- }
-
- labelNames := make(LabelNames, 0, len(ls))
- for labelName := range ls {
- labelNames = append(labelNames, labelName)
- }
- sort.Sort(labelNames)
-
- sum := hashNew()
- for _, labelName := range labelNames {
- sum = hashAdd(sum, string(labelName))
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, string(ls[labelName]))
- sum = hashAddByte(sum, SeparatorByte)
- }
- return Fingerprint(sum)
-}
-
-// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
-// faster and less allocation-heavy hash function, which is more susceptible to
-// create hash collisions. Therefore, collision detection should be applied.
-func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
- if len(ls) == 0 {
- return Fingerprint(emptyLabelSignature)
- }
-
- var result uint64
- for labelName, labelValue := range ls {
- sum := hashNew()
- sum = hashAdd(sum, string(labelName))
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, string(labelValue))
- result ^= sum
- }
- return Fingerprint(result)
-}
-
-// SignatureForLabels works like LabelsToSignature but takes a Metric as
-// parameter (rather than a label map) and only includes the labels with the
-// specified LabelNames into the signature calculation. The labels passed in
-// will be sorted by this function.
-func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
- if len(labels) == 0 {
- return emptyLabelSignature
- }
-
- sort.Sort(LabelNames(labels))
-
- sum := hashNew()
- for _, label := range labels {
- sum = hashAdd(sum, string(label))
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, string(m[label]))
- sum = hashAddByte(sum, SeparatorByte)
- }
- return sum
-}
-
-// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
-// parameter (rather than a label map) and excludes the labels with any of the
-// specified LabelNames from the signature calculation.
-func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
- if len(m) == 0 {
- return emptyLabelSignature
- }
-
- labelNames := make(LabelNames, 0, len(m))
- for labelName := range m {
- if _, exclude := labels[labelName]; !exclude {
- labelNames = append(labelNames, labelName)
- }
- }
- if len(labelNames) == 0 {
- return emptyLabelSignature
- }
- sort.Sort(labelNames)
-
- sum := hashNew()
- for _, labelName := range labelNames {
- sum = hashAdd(sum, string(labelName))
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, string(m[labelName]))
- sum = hashAddByte(sum, SeparatorByte)
- }
- return sum
-}
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
deleted file mode 100644
index 8f91a9702..000000000
--- a/vendor/github.com/prometheus/common/model/silence.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "regexp"
- "time"
-)
-
-// Matcher describes a matches the value of a given label.
-type Matcher struct {
- Name LabelName `json:"name"`
- Value string `json:"value"`
- IsRegex bool `json:"isRegex"`
-}
-
-func (m *Matcher) UnmarshalJSON(b []byte) error {
- type plain Matcher
- if err := json.Unmarshal(b, (*plain)(m)); err != nil {
- return err
- }
-
- if len(m.Name) == 0 {
- return errors.New("label name in matcher must not be empty")
- }
- if m.IsRegex {
- if _, err := regexp.Compile(m.Value); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Validate returns true iff all fields of the matcher have valid values.
-func (m *Matcher) Validate() error {
- if !m.Name.IsValid() {
- return fmt.Errorf("invalid name %q", m.Name)
- }
- if m.IsRegex {
- if _, err := regexp.Compile(m.Value); err != nil {
- return fmt.Errorf("invalid regular expression %q", m.Value)
- }
- } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
- return fmt.Errorf("invalid value %q", m.Value)
- }
- return nil
-}
-
-// Silence defines the representation of a silence definition in the Prometheus
-// eco-system.
-type Silence struct {
- ID uint64 `json:"id,omitempty"`
-
- Matchers []*Matcher `json:"matchers"`
-
- StartsAt time.Time `json:"startsAt"`
- EndsAt time.Time `json:"endsAt"`
-
- CreatedAt time.Time `json:"createdAt,omitempty"`
- CreatedBy string `json:"createdBy"`
- Comment string `json:"comment,omitempty"`
-}
-
-// Validate returns true iff all fields of the silence have valid values.
-func (s *Silence) Validate() error {
- if len(s.Matchers) == 0 {
- return errors.New("at least one matcher required")
- }
- for _, m := range s.Matchers {
- if err := m.Validate(); err != nil {
- return fmt.Errorf("invalid matcher: %w", err)
- }
- }
- if s.StartsAt.IsZero() {
- return errors.New("start time missing")
- }
- if s.EndsAt.IsZero() {
- return errors.New("end time missing")
- }
- if s.EndsAt.Before(s.StartsAt) {
- return errors.New("start time must be before end time")
- }
- if s.CreatedBy == "" {
- return errors.New("creator information missing")
- }
- if s.Comment == "" {
- return errors.New("comment missing")
- }
- if s.CreatedAt.IsZero() {
- return errors.New("creation timestamp missing")
- }
- return nil
-}
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
deleted file mode 100644
index 5727452c1..000000000
--- a/vendor/github.com/prometheus/common/model/time.go
+++ /dev/null
@@ -1,340 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "math"
- "strconv"
- "strings"
- "time"
-)
-
-const (
- // MinimumTick is the minimum supported time resolution. This has to be
- // at least time.Second in order for the code below to work.
- minimumTick = time.Millisecond
- // second is the Time duration equivalent to one second.
- second = int64(time.Second / minimumTick)
- // The number of nanoseconds per minimum tick.
- nanosPerTick = int64(minimumTick / time.Nanosecond)
-
- // Earliest is the earliest Time representable. Handy for
- // initializing a high watermark.
- Earliest = Time(math.MinInt64)
- // Latest is the latest Time representable. Handy for initializing
- // a low watermark.
- Latest = Time(math.MaxInt64)
-)
-
-// Time is the number of milliseconds since the epoch
-// (1970-01-01 00:00 UTC) excluding leap seconds.
-type Time int64
-
-// Interval describes an interval between two timestamps.
-type Interval struct {
- Start, End Time
-}
-
-// Now returns the current time as a Time.
-func Now() Time {
- return TimeFromUnixNano(time.Now().UnixNano())
-}
-
-// TimeFromUnix returns the Time equivalent to the Unix Time t
-// provided in seconds.
-func TimeFromUnix(t int64) Time {
- return Time(t * second)
-}
-
-// TimeFromUnixNano returns the Time equivalent to the Unix Time
-// t provided in nanoseconds.
-func TimeFromUnixNano(t int64) Time {
- return Time(t / nanosPerTick)
-}
-
-// Equal reports whether two Times represent the same instant.
-func (t Time) Equal(o Time) bool {
- return t == o
-}
-
-// Before reports whether the Time t is before o.
-func (t Time) Before(o Time) bool {
- return t < o
-}
-
-// After reports whether the Time t is after o.
-func (t Time) After(o Time) bool {
- return t > o
-}
-
-// Add returns the Time t + d.
-func (t Time) Add(d time.Duration) Time {
- return t + Time(d/minimumTick)
-}
-
-// Sub returns the Duration t - o.
-func (t Time) Sub(o Time) time.Duration {
- return time.Duration(t-o) * minimumTick
-}
-
-// Time returns the time.Time representation of t.
-func (t Time) Time() time.Time {
- return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
-}
-
-// Unix returns t as a Unix time, the number of seconds elapsed
-// since January 1, 1970 UTC.
-func (t Time) Unix() int64 {
- return int64(t) / second
-}
-
-// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
-// since January 1, 1970 UTC.
-func (t Time) UnixNano() int64 {
- return int64(t) * nanosPerTick
-}
-
-// The number of digits after the dot.
-var dotPrecision = int(math.Log10(float64(second)))
-
-// String returns a string representation of the Time.
-func (t Time) String() string {
- return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (t Time) MarshalJSON() ([]byte, error) {
- return []byte(t.String()), nil
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (t *Time) UnmarshalJSON(b []byte) error {
- p := strings.Split(string(b), ".")
- switch len(p) {
- case 1:
- v, err := strconv.ParseInt(string(p[0]), 10, 64)
- if err != nil {
- return err
- }
- *t = Time(v * second)
-
- case 2:
- v, err := strconv.ParseInt(string(p[0]), 10, 64)
- if err != nil {
- return err
- }
- v *= second
-
- prec := dotPrecision - len(p[1])
- if prec < 0 {
- p[1] = p[1][:dotPrecision]
- } else if prec > 0 {
- p[1] = p[1] + strings.Repeat("0", prec)
- }
-
- va, err := strconv.ParseInt(p[1], 10, 32)
- if err != nil {
- return err
- }
-
- // If the value was something like -0.1 the negative is lost in the
- // parsing because of the leading zero, this ensures that we capture it.
- if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 {
- *t = Time(v+va) * -1
- } else {
- *t = Time(v + va)
- }
-
- default:
- return fmt.Errorf("invalid time %q", string(b))
- }
- return nil
-}
-
-// Duration wraps time.Duration. It is used to parse the custom duration format
-// from YAML.
-// This type should not propagate beyond the scope of input/output processing.
-type Duration time.Duration
-
-// Set implements pflag/flag.Value
-func (d *Duration) Set(s string) error {
- var err error
- *d, err = ParseDuration(s)
- return err
-}
-
-// Type implements pflag.Value
-func (d *Duration) Type() string {
- return "duration"
-}
-
-func isdigit(c byte) bool { return c >= '0' && c <= '9' }
-
-// Units are required to go in order from biggest to smallest.
-// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day.
-var unitMap = map[string]struct {
- pos int
- mult uint64
-}{
- "ms": {7, uint64(time.Millisecond)},
- "s": {6, uint64(time.Second)},
- "m": {5, uint64(time.Minute)},
- "h": {4, uint64(time.Hour)},
- "d": {3, uint64(24 * time.Hour)},
- "w": {2, uint64(7 * 24 * time.Hour)},
- "y": {1, uint64(365 * 24 * time.Hour)},
-}
-
-// ParseDuration parses a string into a time.Duration, assuming that a year
-// always has 365d, a week always has 7d, and a day always has 24h.
-func ParseDuration(s string) (Duration, error) {
- switch s {
- case "0":
- // Allow 0 without a unit.
- return 0, nil
- case "":
- return 0, errors.New("empty duration string")
- }
-
- orig := s
- var dur uint64
- lastUnitPos := 0
-
- for s != "" {
- if !isdigit(s[0]) {
- return 0, fmt.Errorf("not a valid duration string: %q", orig)
- }
- // Consume [0-9]*
- i := 0
- for ; i < len(s) && isdigit(s[i]); i++ {
- }
- v, err := strconv.ParseUint(s[:i], 10, 0)
- if err != nil {
- return 0, fmt.Errorf("not a valid duration string: %q", orig)
- }
- s = s[i:]
-
- // Consume unit.
- for i = 0; i < len(s) && !isdigit(s[i]); i++ {
- }
- if i == 0 {
- return 0, fmt.Errorf("not a valid duration string: %q", orig)
- }
- u := s[:i]
- s = s[i:]
- unit, ok := unitMap[u]
- if !ok {
- return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig)
- }
- if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest.
- return 0, fmt.Errorf("not a valid duration string: %q", orig)
- }
- lastUnitPos = unit.pos
- // Check if the provided duration overflows time.Duration (> ~ 290years).
- if v > 1<<63/unit.mult {
- return 0, errors.New("duration out of range")
- }
- dur += v * unit.mult
- if dur > 1<<63-1 {
- return 0, errors.New("duration out of range")
- }
- }
- return Duration(dur), nil
-}
-
-func (d Duration) String() string {
- var (
- ms = int64(time.Duration(d) / time.Millisecond)
- r = ""
- )
- if ms == 0 {
- return "0s"
- }
-
- f := func(unit string, mult int64, exact bool) {
- if exact && ms%mult != 0 {
- return
- }
- if v := ms / mult; v > 0 {
- r += fmt.Sprintf("%d%s", v, unit)
- ms -= v * mult
- }
- }
-
- // Only format years and weeks if the remainder is zero, as it is often
- // easier to read 90d than 12w6d.
- f("y", 1000*60*60*24*365, true)
- f("w", 1000*60*60*24*7, true)
-
- f("d", 1000*60*60*24, false)
- f("h", 1000*60*60, false)
- f("m", 1000*60, false)
- f("s", 1000, false)
- f("ms", 1, false)
-
- return r
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (d Duration) MarshalJSON() ([]byte, error) {
- return json.Marshal(d.String())
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (d *Duration) UnmarshalJSON(bytes []byte) error {
- var s string
- if err := json.Unmarshal(bytes, &s); err != nil {
- return err
- }
- dur, err := ParseDuration(s)
- if err != nil {
- return err
- }
- *d = dur
- return nil
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-func (d *Duration) MarshalText() ([]byte, error) {
- return []byte(d.String()), nil
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-func (d *Duration) UnmarshalText(text []byte) error {
- var err error
- *d, err = ParseDuration(string(text))
- return err
-}
-
-// MarshalYAML implements the yaml.Marshaler interface.
-func (d Duration) MarshalYAML() (interface{}, error) {
- return d.String(), nil
-}
-
-// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var s string
- if err := unmarshal(&s); err != nil {
- return err
- }
- dur, err := ParseDuration(s)
- if err != nil {
- return err
- }
- *d = dur
- return nil
-}
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
deleted file mode 100644
index 8050637d8..000000000
--- a/vendor/github.com/prometheus/common/model/value.go
+++ /dev/null
@@ -1,364 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
- "sort"
- "strconv"
- "strings"
-)
-
-// ZeroSample is the pseudo zero-value of Sample used to signal a
-// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
-// and metric nil. Note that the natural zero value of Sample has a timestamp
-// of 0, which is possible to appear in a real Sample and thus not suitable
-// to signal a non-existing Sample.
-var ZeroSample = Sample{Timestamp: Earliest}
-
-// Sample is a sample pair associated with a metric. A single sample must either
-// define Value or Histogram but not both. Histogram == nil implies the Value
-// field is used, otherwise it should be ignored.
-type Sample struct {
- Metric Metric `json:"metric"`
- Value SampleValue `json:"value"`
- Timestamp Time `json:"timestamp"`
- Histogram *SampleHistogram `json:"histogram"`
-}
-
-// Equal compares first the metrics, then the timestamp, then the value. The
-// semantics of value equality is defined by SampleValue.Equal.
-func (s *Sample) Equal(o *Sample) bool {
- if s == o {
- return true
- }
-
- if !s.Metric.Equal(o.Metric) {
- return false
- }
- if !s.Timestamp.Equal(o.Timestamp) {
- return false
- }
- if s.Histogram != nil {
- return s.Histogram.Equal(o.Histogram)
- }
- return s.Value.Equal(o.Value)
-}
-
-func (s Sample) String() string {
- if s.Histogram != nil {
- return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{
- Timestamp: s.Timestamp,
- Histogram: s.Histogram,
- })
- }
- return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
- Timestamp: s.Timestamp,
- Value: s.Value,
- })
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s Sample) MarshalJSON() ([]byte, error) {
- if s.Histogram != nil {
- v := struct {
- Metric Metric `json:"metric"`
- Histogram SampleHistogramPair `json:"histogram"`
- }{
- Metric: s.Metric,
- Histogram: SampleHistogramPair{
- Timestamp: s.Timestamp,
- Histogram: s.Histogram,
- },
- }
- return json.Marshal(&v)
- }
- v := struct {
- Metric Metric `json:"metric"`
- Value SamplePair `json:"value"`
- }{
- Metric: s.Metric,
- Value: SamplePair{
- Timestamp: s.Timestamp,
- Value: s.Value,
- },
- }
- return json.Marshal(&v)
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *Sample) UnmarshalJSON(b []byte) error {
- v := struct {
- Metric Metric `json:"metric"`
- Value SamplePair `json:"value"`
- Histogram SampleHistogramPair `json:"histogram"`
- }{
- Metric: s.Metric,
- Value: SamplePair{
- Timestamp: s.Timestamp,
- Value: s.Value,
- },
- Histogram: SampleHistogramPair{
- Timestamp: s.Timestamp,
- Histogram: s.Histogram,
- },
- }
-
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
-
- s.Metric = v.Metric
- if v.Histogram.Histogram != nil {
- s.Timestamp = v.Histogram.Timestamp
- s.Histogram = v.Histogram.Histogram
- } else {
- s.Timestamp = v.Value.Timestamp
- s.Value = v.Value.Value
- }
-
- return nil
-}
-
-// Samples is a sortable Sample slice. It implements sort.Interface.
-type Samples []*Sample
-
-func (s Samples) Len() int {
- return len(s)
-}
-
-// Less compares first the metrics, then the timestamp.
-func (s Samples) Less(i, j int) bool {
- switch {
- case s[i].Metric.Before(s[j].Metric):
- return true
- case s[j].Metric.Before(s[i].Metric):
- return false
- case s[i].Timestamp.Before(s[j].Timestamp):
- return true
- default:
- return false
- }
-}
-
-func (s Samples) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-// Equal compares two sets of samples and returns true if they are equal.
-func (s Samples) Equal(o Samples) bool {
- if len(s) != len(o) {
- return false
- }
-
- for i, sample := range s {
- if !sample.Equal(o[i]) {
- return false
- }
- }
- return true
-}
-
-// SampleStream is a stream of Values belonging to an attached COWMetric.
-type SampleStream struct {
- Metric Metric `json:"metric"`
- Values []SamplePair `json:"values"`
- Histograms []SampleHistogramPair `json:"histograms"`
-}
-
-func (ss SampleStream) String() string {
- valuesLength := len(ss.Values)
- vals := make([]string, valuesLength+len(ss.Histograms))
- for i, v := range ss.Values {
- vals[i] = v.String()
- }
- for i, v := range ss.Histograms {
- vals[i+valuesLength] = v.String()
- }
- return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
-}
-
-func (ss SampleStream) MarshalJSON() ([]byte, error) {
- if len(ss.Histograms) > 0 && len(ss.Values) > 0 {
- v := struct {
- Metric Metric `json:"metric"`
- Values []SamplePair `json:"values"`
- Histograms []SampleHistogramPair `json:"histograms"`
- }{
- Metric: ss.Metric,
- Values: ss.Values,
- Histograms: ss.Histograms,
- }
- return json.Marshal(&v)
- } else if len(ss.Histograms) > 0 {
- v := struct {
- Metric Metric `json:"metric"`
- Histograms []SampleHistogramPair `json:"histograms"`
- }{
- Metric: ss.Metric,
- Histograms: ss.Histograms,
- }
- return json.Marshal(&v)
- } else {
- v := struct {
- Metric Metric `json:"metric"`
- Values []SamplePair `json:"values"`
- }{
- Metric: ss.Metric,
- Values: ss.Values,
- }
- return json.Marshal(&v)
- }
-}
-
-func (ss *SampleStream) UnmarshalJSON(b []byte) error {
- v := struct {
- Metric Metric `json:"metric"`
- Values []SamplePair `json:"values"`
- Histograms []SampleHistogramPair `json:"histograms"`
- }{
- Metric: ss.Metric,
- Values: ss.Values,
- Histograms: ss.Histograms,
- }
-
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
-
- ss.Metric = v.Metric
- ss.Values = v.Values
- ss.Histograms = v.Histograms
-
- return nil
-}
-
-// Scalar is a scalar value evaluated at the set timestamp.
-type Scalar struct {
- Value SampleValue `json:"value"`
- Timestamp Time `json:"timestamp"`
-}
-
-func (s Scalar) String() string {
- return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s Scalar) MarshalJSON() ([]byte, error) {
- v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
- return json.Marshal([...]interface{}{s.Timestamp, string(v)})
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *Scalar) UnmarshalJSON(b []byte) error {
- var f string
- v := [...]interface{}{&s.Timestamp, &f}
-
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
-
- value, err := strconv.ParseFloat(f, 64)
- if err != nil {
- return fmt.Errorf("error parsing sample value: %w", err)
- }
- s.Value = SampleValue(value)
- return nil
-}
-
-// String is a string value evaluated at the set timestamp.
-type String struct {
- Value string `json:"value"`
- Timestamp Time `json:"timestamp"`
-}
-
-func (s *String) String() string {
- return s.Value
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s String) MarshalJSON() ([]byte, error) {
- return json.Marshal([]interface{}{s.Timestamp, s.Value})
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *String) UnmarshalJSON(b []byte) error {
- v := [...]interface{}{&s.Timestamp, &s.Value}
- return json.Unmarshal(b, &v)
-}
-
-// Vector is basically only an alias for Samples, but the
-// contract is that in a Vector, all Samples have the same timestamp.
-type Vector []*Sample
-
-func (vec Vector) String() string {
- entries := make([]string, len(vec))
- for i, s := range vec {
- entries[i] = s.String()
- }
- return strings.Join(entries, "\n")
-}
-
-func (vec Vector) Len() int { return len(vec) }
-func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
-
-// Less compares first the metrics, then the timestamp.
-func (vec Vector) Less(i, j int) bool {
- switch {
- case vec[i].Metric.Before(vec[j].Metric):
- return true
- case vec[j].Metric.Before(vec[i].Metric):
- return false
- case vec[i].Timestamp.Before(vec[j].Timestamp):
- return true
- default:
- return false
- }
-}
-
-// Equal compares two sets of samples and returns true if they are equal.
-func (vec Vector) Equal(o Vector) bool {
- if len(vec) != len(o) {
- return false
- }
-
- for i, sample := range vec {
- if !sample.Equal(o[i]) {
- return false
- }
- }
- return true
-}
-
-// Matrix is a list of time series.
-type Matrix []*SampleStream
-
-func (m Matrix) Len() int { return len(m) }
-func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
-func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
-
-func (mat Matrix) String() string {
- matCp := make(Matrix, len(mat))
- copy(matCp, mat)
- sort.Sort(matCp)
-
- strs := make([]string, len(matCp))
-
- for i, ss := range matCp {
- strs[i] = ss.String()
- }
-
- return strings.Join(strs, "\n")
-}
diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go
deleted file mode 100644
index 6bfc757d1..000000000
--- a/vendor/github.com/prometheus/common/model/value_float.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "math"
- "strconv"
-)
-
-// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
-// non-existing sample pair. It is a SamplePair with timestamp Earliest and
-// value 0.0. Note that the natural zero value of SamplePair has a timestamp
-// of 0, which is possible to appear in a real SamplePair and thus not
-// suitable to signal a non-existing SamplePair.
-var ZeroSamplePair = SamplePair{Timestamp: Earliest}
-
-// A SampleValue is a representation of a value for a given sample at a given
-// time.
-type SampleValue float64
-
-// MarshalJSON implements json.Marshaler.
-func (v SampleValue) MarshalJSON() ([]byte, error) {
- return json.Marshal(v.String())
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (v *SampleValue) UnmarshalJSON(b []byte) error {
- if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
- return errors.New("sample value must be a quoted string")
- }
- f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
- if err != nil {
- return err
- }
- *v = SampleValue(f)
- return nil
-}
-
-// Equal returns true if the value of v and o is equal or if both are NaN. Note
-// that v==o is false if both are NaN. If you want the conventional float
-// behavior, use == to compare two SampleValues.
-func (v SampleValue) Equal(o SampleValue) bool {
- if v == o {
- return true
- }
- return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
-}
-
-func (v SampleValue) String() string {
- return strconv.FormatFloat(float64(v), 'f', -1, 64)
-}
-
-// SamplePair pairs a SampleValue with a Timestamp.
-type SamplePair struct {
- Timestamp Time
- Value SampleValue
-}
-
-func (s SamplePair) MarshalJSON() ([]byte, error) {
- t, err := json.Marshal(s.Timestamp)
- if err != nil {
- return nil, err
- }
- v, err := json.Marshal(s.Value)
- if err != nil {
- return nil, err
- }
- return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *SamplePair) UnmarshalJSON(b []byte) error {
- v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
- return json.Unmarshal(b, &v)
-}
-
-// Equal returns true if this SamplePair and o have equal Values and equal
-// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
-func (s *SamplePair) Equal(o *SamplePair) bool {
- return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
-}
-
-func (s SamplePair) String() string {
- return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
-}
diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go
deleted file mode 100644
index 895e6a3e8..000000000
--- a/vendor/github.com/prometheus/common/model/value_histogram.go
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "strconv"
- "strings"
-)
-
-type FloatString float64
-
-func (v FloatString) String() string {
- return strconv.FormatFloat(float64(v), 'f', -1, 64)
-}
-
-func (v FloatString) MarshalJSON() ([]byte, error) {
- return json.Marshal(v.String())
-}
-
-func (v *FloatString) UnmarshalJSON(b []byte) error {
- if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
- return errors.New("float value must be a quoted string")
- }
- f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
- if err != nil {
- return err
- }
- *v = FloatString(f)
- return nil
-}
-
-type HistogramBucket struct {
- Boundaries int32
- Lower FloatString
- Upper FloatString
- Count FloatString
-}
-
-func (s HistogramBucket) MarshalJSON() ([]byte, error) {
- b, err := json.Marshal(s.Boundaries)
- if err != nil {
- return nil, err
- }
- l, err := json.Marshal(s.Lower)
- if err != nil {
- return nil, err
- }
- u, err := json.Marshal(s.Upper)
- if err != nil {
- return nil, err
- }
- c, err := json.Marshal(s.Count)
- if err != nil {
- return nil, err
- }
- return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil
-}
-
-func (s *HistogramBucket) UnmarshalJSON(buf []byte) error {
- tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count}
- wantLen := len(tmp)
- if err := json.Unmarshal(buf, &tmp); err != nil {
- return err
- }
- if gotLen := len(tmp); gotLen != wantLen {
- return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
- }
- return nil
-}
-
-func (s *HistogramBucket) Equal(o *HistogramBucket) bool {
- return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count)
-}
-
-func (b HistogramBucket) String() string {
- var sb strings.Builder
- lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3
- upperInclusive := b.Boundaries == 0 || b.Boundaries == 3
- if lowerInclusive {
- sb.WriteRune('[')
- } else {
- sb.WriteRune('(')
- }
- fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper)
- if upperInclusive {
- sb.WriteRune(']')
- } else {
- sb.WriteRune(')')
- }
- fmt.Fprintf(&sb, ":%v", b.Count)
- return sb.String()
-}
-
-type HistogramBuckets []*HistogramBucket
-
-func (s HistogramBuckets) Equal(o HistogramBuckets) bool {
- if len(s) != len(o) {
- return false
- }
-
- for i, bucket := range s {
- if !bucket.Equal(o[i]) {
- return false
- }
- }
- return true
-}
-
-type SampleHistogram struct {
- Count FloatString `json:"count"`
- Sum FloatString `json:"sum"`
- Buckets HistogramBuckets `json:"buckets"`
-}
-
-func (s SampleHistogram) String() string {
- return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets)
-}
-
-func (s *SampleHistogram) Equal(o *SampleHistogram) bool {
- return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets))
-}
-
-type SampleHistogramPair struct {
- Timestamp Time
- // Histogram should never be nil, it's only stored as pointer for efficiency.
- Histogram *SampleHistogram
-}
-
-func (s SampleHistogramPair) MarshalJSON() ([]byte, error) {
- if s.Histogram == nil {
- return nil, errors.New("histogram is nil")
- }
- t, err := json.Marshal(s.Timestamp)
- if err != nil {
- return nil, err
- }
- v, err := json.Marshal(s.Histogram)
- if err != nil {
- return nil, err
- }
- return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
-}
-
-func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error {
- tmp := []interface{}{&s.Timestamp, &s.Histogram}
- wantLen := len(tmp)
- if err := json.Unmarshal(buf, &tmp); err != nil {
- return err
- }
- if gotLen := len(tmp); gotLen != wantLen {
- return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
- }
- if s.Histogram == nil {
- return errors.New("histogram is null")
- }
- return nil
-}
-
-func (s SampleHistogramPair) String() string {
- return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp)
-}
-
-func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool {
- return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp))
-}
diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go
deleted file mode 100644
index 726c50ee6..000000000
--- a/vendor/github.com/prometheus/common/model/value_type.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
-)
-
-// Value is a generic interface for values resulting from a query evaluation.
-type Value interface {
- Type() ValueType
- String() string
-}
-
-func (Matrix) Type() ValueType { return ValMatrix }
-func (Vector) Type() ValueType { return ValVector }
-func (*Scalar) Type() ValueType { return ValScalar }
-func (*String) Type() ValueType { return ValString }
-
-type ValueType int
-
-const (
- ValNone ValueType = iota
- ValScalar
- ValVector
- ValMatrix
- ValString
-)
-
-// MarshalJSON implements json.Marshaler.
-func (et ValueType) MarshalJSON() ([]byte, error) {
- return json.Marshal(et.String())
-}
-
-func (et *ValueType) UnmarshalJSON(b []byte) error {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
- return err
- }
- switch s {
- case "":
- *et = ValNone
- case "scalar":
- *et = ValScalar
- case "vector":
- *et = ValVector
- case "matrix":
- *et = ValMatrix
- case "string":
- *et = ValString
- default:
- return fmt.Errorf("unknown value type %q", s)
- }
- return nil
-}
-
-func (e ValueType) String() string {
- switch e {
- case ValNone:
- return ""
- case ValScalar:
- return "scalar"
- case ValVector:
- return "vector"
- case ValMatrix:
- return "matrix"
- case ValString:
- return "string"
- }
- panic("ValueType.String: unhandled value type")
-}
diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore
deleted file mode 100644
index 7cc33ae4a..000000000
--- a/vendor/github.com/prometheus/procfs/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/testdata/fixtures/
-/fixtures
diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml
deleted file mode 100644
index 126df9e67..000000000
--- a/vendor/github.com/prometheus/procfs/.golangci.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-linters:
- enable:
- - errcheck
- - godot
- - gosimple
- - govet
- - ineffassign
- - misspell
- - revive
- - staticcheck
- - testifylint
- - unused
-
-linter-settings:
- godot:
- capital: true
- exclude:
- # Ignore "See: URL"
- - 'See:'
- misspell:
- locale: US
diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
deleted file mode 100644
index d325872bd..000000000
--- a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Prometheus Community Code of Conduct
-
-Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
deleted file mode 100644
index 853eb9d49..000000000
--- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
+++ /dev/null
@@ -1,121 +0,0 @@
-# Contributing
-
-Prometheus uses GitHub to manage reviews of pull requests.
-
-* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute)
-
-* If you have a trivial fix or improvement, go ahead and create a pull request,
- addressing (with `@...`) a suitable maintainer of this repository (see
- [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
-
-* If you plan to do something more involved, first discuss your ideas
- on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
- This will avoid unnecessary work and surely give you and us a good deal
- of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on.
-
-* Relevant coding style guidelines are the [Go Code Review
- Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
- and the _Formatting and style_ section of Peter Bourgon's [Go: Best
- Practices for Production
- Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style).
-
-* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works)
-
-## Steps to Contribute
-
-Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue.
-
-Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community).
-
-For quickly compiling and testing your changes do:
-```
-make test # Make sure all the tests pass before you commit and push :)
-```
-
-We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action.
-
-## Pull Request Checklist
-
-* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes.
-
-* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests).
-
-* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)).
-
-* Add tests relevant to the fixed bug or new feature.
-
-## Dependency management
-
-The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed.
-
-All dependencies are vendored in the `vendor/` directory.
-
-To add or update a new dependency, use the `go get` command:
-
-```bash
-# Pick the latest tagged release.
-go get example.com/some/module/pkg
-
-# Pick a specific version.
-go get example.com/some/module/pkg@vX.Y.Z
-```
-
-Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory:
-
-
-```bash
-# The GO111MODULE variable can be omitted when the code isn't located in GOPATH.
-GO111MODULE=on go mod tidy
-
-GO111MODULE=on go mod vendor
-```
-
-You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request.
-
-
-## API Implementation Guidelines
-
-### Naming and Documentation
-
-Public functions and structs should normally be named according to the file(s) being read and parsed. For example,
-the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`. In addition, the godoc for each public function
-should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s).
-
-### Reading vs. Parsing
-
-Most functionality in this library consists of reading files and then parsing the text into structured data. In most
-cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and
-a private `parseThing(r Reader)` function. This provides a logical separation and allows parsing to be tested
-directly without the need to read from the filesystem. Using a `Reader` argument is preferred over other data types
-such as `string` or `*File` because it provides the most flexibility regarding the data source. When a set of files
-in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead.
-
-### /proc and /sys filesystem I/O
-
-The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O.
-Many of the files are changing continuously and the data being read can in some cases change between subsequent
-reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls
-to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the
-full file in a single operation using an internal utility function called `util.ReadFileNoStat`.
-This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of
-the file.
-
-Note that parsing the file's contents can still be performed one line at a time. This is done by first reading
-the full file, and then using a scanner on the `[]byte` or `string` containing the data.
-
-```
- data, err := util.ReadFileNoStat("/proc/cpuinfo")
- if err != nil {
- return err
- }
- reader := bytes.NewReader(data)
- scanner := bufio.NewScanner(reader)
-```
-
-The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files
-can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does
-not bother to check the size of the file before reading.
-```
- data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity")
-```
-
diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
deleted file mode 100644
index e00f3b365..000000000
--- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md
+++ /dev/null
@@ -1,3 +0,0 @@
-* Johannes 'fish' Ziemke @discordianfish
-* Paul Gier @pgier
-* Ben Kochie @SuperQ
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
deleted file mode 100644
index 7edfe4d09..000000000
--- a/vendor/github.com/prometheus/procfs/Makefile
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2018 The Prometheus Authors
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-include Makefile.common
-
-%/.unpacked: %.ttar
- @echo ">> extracting fixtures $*"
- ./ttar -C $(dir $*) -x -f $*.ttar
- touch $@
-
-fixtures: testdata/fixtures/.unpacked
-
-update_fixtures:
- rm -vf testdata/fixtures/.unpacked
- ./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/
-
-.PHONY: build
-build:
-
-.PHONY: test
-test: testdata/fixtures/.unpacked common-test
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
deleted file mode 100644
index 161729235..000000000
--- a/vendor/github.com/prometheus/procfs/Makefile.common
+++ /dev/null
@@ -1,277 +0,0 @@
-# Copyright 2018 The Prometheus Authors
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# A common Makefile that includes rules to be reused in different prometheus projects.
-# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
-
-# Example usage :
-# Create the main Makefile in the root project directory.
-# include Makefile.common
-# customTarget:
-# @echo ">> Running customTarget"
-#
-
-# Ensure GOBIN is not set during build so that promu is installed to the correct path
-unexport GOBIN
-
-GO ?= go
-GOFMT ?= $(GO)fmt
-FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
-GOOPTS ?=
-GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
-GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
-
-GO_VERSION ?= $(shell $(GO) version)
-GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
-PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
-
-PROMU := $(FIRST_GOPATH)/bin/promu
-pkgs = ./...
-
-ifeq (arm, $(GOHOSTARCH))
- GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM)
- GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM)
-else
- GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
-endif
-
-GOTEST := $(GO) test
-GOTEST_DIR :=
-ifneq ($(CIRCLE_JOB),)
-ifneq ($(shell command -v gotestsum 2> /dev/null),)
- GOTEST_DIR := test-results
- GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
-endif
-endif
-
-PROMU_VERSION ?= 0.17.0
-PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
-
-SKIP_GOLANGCI_LINT :=
-GOLANGCI_LINT :=
-GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.59.0
-# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
-# windows isn't included here because of the path separator being different.
-ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
- ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
- # If we're in CI and there is an Actions file, that means the linter
- # is being run in Actions, so we don't need to run it here.
- ifneq (,$(SKIP_GOLANGCI_LINT))
- GOLANGCI_LINT :=
- else ifeq (,$(CIRCLE_JOB))
- GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
- else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
- GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
- endif
- endif
-endif
-
-PREFIX ?= $(shell pwd)
-BIN_DIR ?= $(shell pwd)
-DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
-DOCKERFILE_PATH ?= ./Dockerfile
-DOCKERBUILD_CONTEXT ?= ./
-DOCKER_REPO ?= prom
-
-DOCKER_ARCHS ?= amd64
-
-BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
-PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
-TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
-
-SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
-
-ifeq ($(GOHOSTARCH),amd64)
- ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
- # Only supported on amd64
- test-flags := -race
- endif
-endif
-
-# This rule is used to forward a target like "build" to "common-build". This
-# allows a new "build" target to be defined in a Makefile which includes this
-# one and override "common-build" without override warnings.
-%: common-% ;
-
-.PHONY: common-all
-common-all: precheck style check_license lint yamllint unused build test
-
-.PHONY: common-style
-common-style:
- @echo ">> checking code style"
- @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \
- if [ -n "$${fmtRes}" ]; then \
- echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \
- echo "Please ensure you are using $$($(GO) version) for formatting code."; \
- exit 1; \
- fi
-
-.PHONY: common-check_license
-common-check_license:
- @echo ">> checking license header"
- @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
- awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \
- done); \
- if [ -n "$${licRes}" ]; then \
- echo "license header checking failed:"; echo "$${licRes}"; \
- exit 1; \
- fi
-
-.PHONY: common-deps
-common-deps:
- @echo ">> getting dependencies"
- $(GO) mod download
-
-.PHONY: update-go-deps
-update-go-deps:
- @echo ">> updating Go dependencies"
- @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
- $(GO) get -d $$m; \
- done
- $(GO) mod tidy
-
-.PHONY: common-test-short
-common-test-short: $(GOTEST_DIR)
- @echo ">> running short tests"
- $(GOTEST) -short $(GOOPTS) $(pkgs)
-
-.PHONY: common-test
-common-test: $(GOTEST_DIR)
- @echo ">> running all tests"
- $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
-
-$(GOTEST_DIR):
- @mkdir -p $@
-
-.PHONY: common-format
-common-format:
- @echo ">> formatting code"
- $(GO) fmt $(pkgs)
-
-.PHONY: common-vet
-common-vet:
- @echo ">> vetting code"
- $(GO) vet $(GOOPTS) $(pkgs)
-
-.PHONY: common-lint
-common-lint: $(GOLANGCI_LINT)
-ifdef GOLANGCI_LINT
- @echo ">> running golangci-lint"
- $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
-endif
-
-.PHONY: common-lint-fix
-common-lint-fix: $(GOLANGCI_LINT)
-ifdef GOLANGCI_LINT
- @echo ">> running golangci-lint fix"
- $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
-endif
-
-.PHONY: common-yamllint
-common-yamllint:
- @echo ">> running yamllint on all YAML files in the repository"
-ifeq (, $(shell command -v yamllint 2> /dev/null))
- @echo "yamllint not installed so skipping"
-else
- yamllint .
-endif
-
-# For backward-compatibility.
-.PHONY: common-staticcheck
-common-staticcheck: lint
-
-.PHONY: common-unused
-common-unused:
- @echo ">> running check for unused/missing packages in go.mod"
- $(GO) mod tidy
- @git diff --exit-code -- go.sum go.mod
-
-.PHONY: common-build
-common-build: promu
- @echo ">> building binaries"
- $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
-
-.PHONY: common-tarball
-common-tarball: promu
- @echo ">> building release tarball"
- $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
-
-.PHONY: common-docker-repo-name
-common-docker-repo-name:
- @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
-
-.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
-common-docker: $(BUILD_DOCKER_ARCHS)
-$(BUILD_DOCKER_ARCHS): common-docker-%:
- docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
- -f $(DOCKERFILE_PATH) \
- --build-arg ARCH="$*" \
- --build-arg OS="linux" \
- $(DOCKERBUILD_CONTEXT)
-
-.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
-common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
-$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
- docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
-
-DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
-.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
-common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
-$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
-
-.PHONY: common-docker-manifest
-common-docker-manifest:
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
-
-.PHONY: promu
-promu: $(PROMU)
-
-$(PROMU):
- $(eval PROMU_TMP := $(shell mktemp -d))
- curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP)
- mkdir -p $(FIRST_GOPATH)/bin
- cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
- rm -r $(PROMU_TMP)
-
-.PHONY: proto
-proto:
- @echo ">> generating code from proto files"
- @./scripts/genproto.sh
-
-ifdef GOLANGCI_LINT
-$(GOLANGCI_LINT):
- mkdir -p $(FIRST_GOPATH)/bin
- curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \
- | sed -e '/install -d/d' \
- | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
-endif
-
-.PHONY: precheck
-precheck::
-
-define PRECHECK_COMMAND_template =
-precheck:: $(1)_precheck
-
-PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))
-.PHONY: $(1)_precheck
-$(1)_precheck:
- @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \
- echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \
- exit 1; \
- fi
-endef
diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE
deleted file mode 100644
index 53c5e9aa1..000000000
--- a/vendor/github.com/prometheus/procfs/NOTICE
+++ /dev/null
@@ -1,7 +0,0 @@
-procfs provides functions to retrieve system, kernel and process
-metrics from the pseudo-filesystem proc.
-
-Copyright 2014-2015 The Prometheus Authors
-
-This product includes software developed at
-SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
deleted file mode 100644
index 1224816c2..000000000
--- a/vendor/github.com/prometheus/procfs/README.md
+++ /dev/null
@@ -1,61 +0,0 @@
-# procfs
-
-This package provides functions to retrieve system, kernel, and process
-metrics from the pseudo-filesystems /proc and /sys.
-
-*WARNING*: This package is a work in progress. Its API may still break in
-backwards-incompatible ways without warnings. Use it at your own risk.
-
-[](https://pkg.go.dev/github.com/prometheus/procfs)
-[](https://circleci.com/gh/prometheus/procfs/tree/master)
-[](https://goreportcard.com/report/github.com/prometheus/procfs)
-
-## Usage
-
-The procfs library is organized by packages based on whether the gathered data is coming from
-/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc,
-/sys, or both. For example, cpu statistics are gathered from
-`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount
-point is initialized, and then the stat information is read.
-
-```go
-fs, err := procfs.NewFS("/proc")
-stats, err := fs.Stat()
-```
-
-Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems.
-
-```go
- fs, err := blockdevice.NewFS("/proc", "/sys")
- stats, err := fs.ProcDiskstats()
-```
-
-## Package Organization
-
-The packages in this project are organized according to (1) whether the data comes from the `/proc` or
-`/sys` filesystem and (2) the type of information being retrieved. For example, most process information
-can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives
-is available in the `blockdevices` sub-package.
-
-## Building and Testing
-
-The procfs library is intended to be built as part of another application, so there are no distributable binaries.
-However, most of the API includes unit tests which can be run with `make test`.
-
-### Updating Test Fixtures
-
-The procfs library includes a set of test fixtures which include many example files from
-the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
-which is extracted automatically during testing. To add/update the test fixtures, first
-ensure the `fixtures` directory is up to date by removing the existing directory and then
-extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
-
-```bash
-rm -rf testdata/fixtures
-make test
-```
-
-Next, make the required changes to the extracted files in the `fixtures` directory. When
-the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
-based on the updated `fixtures` directory. And finally, verify the changes using
-`git diff testdata/fixtures.ttar`.
diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md
deleted file mode 100644
index fed02d85c..000000000
--- a/vendor/github.com/prometheus/procfs/SECURITY.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# Reporting a security issue
-
-The Prometheus security policy, including how to report vulnerabilities, can be
-found here:
-
-
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
deleted file mode 100644
index cdcc8a7cc..000000000
--- a/vendor/github.com/prometheus/procfs/arp.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
- "net"
- "os"
- "strconv"
- "strings"
-)
-
-// Learned from include/uapi/linux/if_arp.h.
-const (
- // completed entry (ha valid).
- ATFComplete = 0x02
- // permanent entry.
- ATFPermanent = 0x04
- // Publish entry.
- ATFPublish = 0x08
- // Has requested trailers.
- ATFUseTrailers = 0x10
- // Obsoleted: Want to use a netmask (only for proxy entries).
- ATFNetmask = 0x20
- // Don't answer this addresses.
- ATFDontPublish = 0x40
-)
-
-// ARPEntry contains a single row of the columnar data represented in
-// /proc/net/arp.
-type ARPEntry struct {
- // IP address
- IPAddr net.IP
- // MAC address
- HWAddr net.HardwareAddr
- // Name of the device
- Device string
- // Flags
- Flags byte
-}
-
-// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
-// and then return a slice of ARPEntry's.
-func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
- data, err := os.ReadFile(fs.proc.Path("net/arp"))
- if err != nil {
- return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
- }
-
- return parseARPEntries(data)
-}
-
-func parseARPEntries(data []byte) ([]ARPEntry, error) {
- lines := strings.Split(string(data), "\n")
- entries := make([]ARPEntry, 0)
- var err error
- const (
- expectedDataWidth = 6
- expectedHeaderWidth = 9
- )
- for _, line := range lines {
- columns := strings.Fields(line)
- width := len(columns)
-
- if width == expectedHeaderWidth || width == 0 {
- continue
- } else if width == expectedDataWidth {
- entry, err := parseARPEntry(columns)
- if err != nil {
- return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
- }
- entries = append(entries, entry)
- } else {
- return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
- }
-
- }
-
- return entries, err
-}
-
-func parseARPEntry(columns []string) (ARPEntry, error) {
- entry := ARPEntry{Device: columns[5]}
- ip := net.ParseIP(columns[0])
- entry.IPAddr = ip
-
- if mac, err := net.ParseMAC(columns[3]); err == nil {
- entry.HWAddr = mac
- } else {
- return ARPEntry{}, err
- }
-
- if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil {
- entry.Flags = byte(flags)
- } else {
- return ARPEntry{}, err
- }
-
- return entry, nil
-}
-
-// IsComplete returns true if ARP entry is marked with complete flag.
-func (entry *ARPEntry) IsComplete() bool {
- return entry.Flags&ATFComplete != 0
-}
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
deleted file mode 100644
index 838075009..000000000
--- a/vendor/github.com/prometheus/procfs/buddyinfo.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "io"
- "os"
- "strconv"
- "strings"
-)
-
-// A BuddyInfo is the details parsed from /proc/buddyinfo.
-// The data is comprised of an array of free fragments of each size.
-// The sizes are 2^n*PAGE_SIZE, where n is the array index.
-type BuddyInfo struct {
- Node string
- Zone string
- Sizes []float64
-}
-
-// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
-func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
- file, err := os.Open(fs.proc.Path("buddyinfo"))
- if err != nil {
- return nil, err
- }
- defer file.Close()
-
- return parseBuddyInfo(file)
-}
-
-func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
- var (
- buddyInfo = []BuddyInfo{}
- scanner = bufio.NewScanner(r)
- bucketCount = -1
- )
-
- for scanner.Scan() {
- var err error
- line := scanner.Text()
- parts := strings.Fields(line)
-
- if len(parts) < 4 {
- return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
- }
-
- node := strings.TrimSuffix(parts[1], ",")
- zone := strings.TrimSuffix(parts[3], ",")
- arraySize := len(parts[4:])
-
- if bucketCount == -1 {
- bucketCount = arraySize
- } else {
- if bucketCount != arraySize {
- return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize)
- }
- }
-
- sizes := make([]float64, arraySize)
- for i := 0; i < arraySize; i++ {
- sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
- if err != nil {
- return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
- }
- }
-
- buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
- }
-
- return buddyInfo, scanner.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/cmdline.go b/vendor/github.com/prometheus/procfs/cmdline.go
deleted file mode 100644
index bf4f3b48c..000000000
--- a/vendor/github.com/prometheus/procfs/cmdline.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2021 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// CmdLine returns the command line of the kernel.
-func (fs FS) CmdLine() ([]string, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path("cmdline"))
- if err != nil {
- return nil, err
- }
-
- return strings.Fields(string(data)), nil
-}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go
deleted file mode 100644
index f0950bb49..000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo.go
+++ /dev/null
@@ -1,519 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux
-// +build linux
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "regexp"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// CPUInfo contains general information about a system CPU found in /proc/cpuinfo.
-type CPUInfo struct {
- Processor uint
- VendorID string
- CPUFamily string
- Model string
- ModelName string
- Stepping string
- Microcode string
- CPUMHz float64
- CacheSize string
- PhysicalID string
- Siblings uint
- CoreID string
- CPUCores uint
- APICID string
- InitialAPICID string
- FPU string
- FPUException string
- CPUIDLevel uint
- WP string
- Flags []string
- Bugs []string
- BogoMips float64
- CLFlushSize uint
- CacheAlignment uint
- AddressSizes string
- PowerManagement string
-}
-
-var (
- cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`)
- cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`)
-)
-
-// CPUInfo returns information about current system CPUs.
-// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
-func (fs FS) CPUInfo() ([]CPUInfo, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo"))
- if err != nil {
- return nil, err
- }
- return parseCPUInfo(data)
-}
-
-func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
-
- // find the first "processor" line
- firstLine := firstNonEmptyLine(scanner)
- if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
- }
- field := strings.SplitN(firstLine, ": ", 2)
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- firstcpu := CPUInfo{Processor: uint(v)}
- cpuinfo := []CPUInfo{firstcpu}
- i := 0
-
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "processor":
- cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
- i++
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].Processor = uint(v)
- case "vendor", "vendor_id":
- cpuinfo[i].VendorID = field[1]
- case "cpu family":
- cpuinfo[i].CPUFamily = field[1]
- case "model":
- cpuinfo[i].Model = field[1]
- case "model name":
- cpuinfo[i].ModelName = field[1]
- case "stepping":
- cpuinfo[i].Stepping = field[1]
- case "microcode":
- cpuinfo[i].Microcode = field[1]
- case "cpu MHz":
- v, err := strconv.ParseFloat(field[1], 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUMHz = v
- case "cache size":
- cpuinfo[i].CacheSize = field[1]
- case "physical id":
- cpuinfo[i].PhysicalID = field[1]
- case "siblings":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].Siblings = uint(v)
- case "core id":
- cpuinfo[i].CoreID = field[1]
- case "cpu cores":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUCores = uint(v)
- case "apicid":
- cpuinfo[i].APICID = field[1]
- case "initial apicid":
- cpuinfo[i].InitialAPICID = field[1]
- case "fpu":
- cpuinfo[i].FPU = field[1]
- case "fpu_exception":
- cpuinfo[i].FPUException = field[1]
- case "cpuid level":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUIDLevel = uint(v)
- case "wp":
- cpuinfo[i].WP = field[1]
- case "flags":
- cpuinfo[i].Flags = strings.Fields(field[1])
- case "bugs":
- cpuinfo[i].Bugs = strings.Fields(field[1])
- case "bogomips":
- v, err := strconv.ParseFloat(field[1], 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].BogoMips = v
- case "clflush size":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CLFlushSize = uint(v)
- case "cache_alignment":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CacheAlignment = uint(v)
- case "address sizes":
- cpuinfo[i].AddressSizes = field[1]
- case "power management":
- cpuinfo[i].PowerManagement = field[1]
- }
- }
- return cpuinfo, nil
-}
-
-func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
-
- firstLine := firstNonEmptyLine(scanner)
- match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
- if !match || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
-
- }
- field := strings.SplitN(firstLine, ": ", 2)
- cpuinfo := []CPUInfo{}
- featuresLine := ""
- commonCPUInfo := CPUInfo{}
- i := 0
- if strings.TrimSpace(field[0]) == "Processor" {
- commonCPUInfo = CPUInfo{ModelName: field[1]}
- i = -1
- } else {
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- firstcpu := CPUInfo{Processor: uint(v)}
- cpuinfo = []CPUInfo{firstcpu}
- }
-
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "processor":
- cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor
- i++
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].Processor = uint(v)
- case "BogoMIPS":
- if i == -1 {
- cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor
- i++
- cpuinfo[i].Processor = 0
- }
- v, err := strconv.ParseFloat(field[1], 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].BogoMips = v
- case "Features":
- featuresLine = line
- case "model name":
- cpuinfo[i].ModelName = field[1]
- }
- }
- fields := strings.SplitN(featuresLine, ": ", 2)
- for i := range cpuinfo {
- cpuinfo[i].Flags = strings.Fields(fields[1])
- }
- return cpuinfo, nil
-
-}
-
-func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
-
- firstLine := firstNonEmptyLine(scanner)
- if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
- }
- field := strings.SplitN(firstLine, ": ", 2)
- cpuinfo := []CPUInfo{}
- commonCPUInfo := CPUInfo{VendorID: field[1]}
-
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "bogomips per cpu":
- v, err := strconv.ParseFloat(field[1], 64)
- if err != nil {
- return nil, err
- }
- commonCPUInfo.BogoMips = v
- case "features":
- commonCPUInfo.Flags = strings.Fields(field[1])
- }
- if strings.HasPrefix(line, "processor") {
- match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
- if len(match) < 2 {
- return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
- }
- cpu := commonCPUInfo
- v, err := strconv.ParseUint(match[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpu.Processor = uint(v)
- cpuinfo = append(cpuinfo, cpu)
- }
- if strings.HasPrefix(line, "cpu number") {
- break
- }
- }
-
- i := 0
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "cpu number":
- i++
- case "cpu MHz dynamic":
- clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
- v, err := strconv.ParseFloat(clock, 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUMHz = v
- case "physical id":
- cpuinfo[i].PhysicalID = field[1]
- case "core id":
- cpuinfo[i].CoreID = field[1]
- case "cpu cores":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUCores = uint(v)
- case "siblings":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].Siblings = uint(v)
- }
- }
-
- return cpuinfo, nil
-}
-
-func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
-
- // find the first "processor" line
- firstLine := firstNonEmptyLine(scanner)
- if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
- }
- field := strings.SplitN(firstLine, ": ", 2)
- cpuinfo := []CPUInfo{}
- systemType := field[1]
-
- i := 0
-
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "processor":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- i = int(v)
- cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
- cpuinfo[i].Processor = uint(v)
- cpuinfo[i].VendorID = systemType
- case "cpu model":
- cpuinfo[i].ModelName = field[1]
- case "BogoMIPS":
- v, err := strconv.ParseFloat(field[1], 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].BogoMips = v
- }
- }
- return cpuinfo, nil
-}
-
-func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
- // find the first "processor" line
- firstLine := firstNonEmptyLine(scanner)
- if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
- }
- field := strings.SplitN(firstLine, ": ", 2)
- cpuinfo := []CPUInfo{}
- systemType := field[1]
- i := 0
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "processor":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- i = int(v)
- cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
- cpuinfo[i].Processor = uint(v)
- cpuinfo[i].VendorID = systemType
- case "CPU Family":
- cpuinfo[i].CPUFamily = field[1]
- case "Model Name":
- cpuinfo[i].ModelName = field[1]
- }
- }
- return cpuinfo, nil
-}
-
-func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
-
- firstLine := firstNonEmptyLine(scanner)
- if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
- }
- field := strings.SplitN(firstLine, ": ", 2)
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- firstcpu := CPUInfo{Processor: uint(v)}
- cpuinfo := []CPUInfo{firstcpu}
- i := 0
-
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "processor":
- cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
- i++
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].Processor = uint(v)
- case "cpu":
- cpuinfo[i].VendorID = field[1]
- case "clock":
- clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
- v, err := strconv.ParseFloat(clock, 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUMHz = v
- }
- }
- return cpuinfo, nil
-}
-
-func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
-
- firstLine := firstNonEmptyLine(scanner)
- if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
- }
- field := strings.SplitN(firstLine, ": ", 2)
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- firstcpu := CPUInfo{Processor: uint(v)}
- cpuinfo := []CPUInfo{firstcpu}
- i := 0
-
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "processor":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- i = int(v)
- cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
- cpuinfo[i].Processor = uint(v)
- case "hart":
- cpuinfo[i].CoreID = field[1]
- case "isa":
- cpuinfo[i].ModelName = field[1]
- }
- }
- return cpuinfo, nil
-}
-
-func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode
- return nil, errors.New("not implemented")
-}
-
-// firstNonEmptyLine advances the scanner to the first non-empty line
-// and returns the contents of that line.
-func firstNonEmptyLine(scanner *bufio.Scanner) string {
- for scanner.Scan() {
- line := scanner.Text()
- if strings.TrimSpace(line) != "" {
- return line
- }
- }
- return ""
-}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
deleted file mode 100644
index 64cfd534c..000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux && (arm || arm64)
-// +build linux
-// +build arm arm64
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoARM
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
deleted file mode 100644
index d88442f0e..000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux
-// +build linux
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoLoong
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
deleted file mode 100644
index c11207f3a..000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux && (mips || mipsle || mips64 || mips64le)
-// +build linux
-// +build mips mipsle mips64 mips64le
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoMips
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
deleted file mode 100644
index a6b2b3127..000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
-// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoDummy
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
deleted file mode 100644
index 003bc2ad4..000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux && (ppc64 || ppc64le)
-// +build linux
-// +build ppc64 ppc64le
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoPPC
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
deleted file mode 100644
index 1c9b7313b..000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux && (riscv || riscv64)
-// +build linux
-// +build riscv riscv64
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoRISCV
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
deleted file mode 100644
index fa3686bc0..000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux
-// +build linux
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoS390X
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
deleted file mode 100644
index a0ef55562..000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux && (386 || amd64)
-// +build linux
-// +build 386 amd64
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoX86
diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go
deleted file mode 100644
index 5f2a37a78..000000000
--- a/vendor/github.com/prometheus/procfs/crypto.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Crypto holds info parsed from /proc/crypto.
-type Crypto struct {
- Alignmask *uint64
- Async bool
- Blocksize *uint64
- Chunksize *uint64
- Ctxsize *uint64
- Digestsize *uint64
- Driver string
- Geniv string
- Internal string
- Ivsize *uint64
- Maxauthsize *uint64
- MaxKeysize *uint64
- MinKeysize *uint64
- Module string
- Name string
- Priority *int64
- Refcnt *int64
- Seedsize *uint64
- Selftest string
- Type string
- Walksize *uint64
-}
-
-// Crypto parses an crypto-file (/proc/crypto) and returns a slice of
-// structs containing the relevant info. More information available here:
-// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
-func (fs FS) Crypto() ([]Crypto, error) {
- path := fs.proc.Path("crypto")
- b, err := util.ReadFileNoStat(path)
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err)
-
- }
-
- crypto, err := parseCrypto(bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err)
- }
-
- return crypto, nil
-}
-
-// parseCrypto parses a /proc/crypto stream into Crypto elements.
-func parseCrypto(r io.Reader) ([]Crypto, error) {
- var out []Crypto
-
- s := bufio.NewScanner(r)
- for s.Scan() {
- text := s.Text()
- switch {
- case strings.HasPrefix(text, "name"):
- // Each crypto element begins with its name.
- out = append(out, Crypto{})
- case text == "":
- continue
- }
-
- kv := strings.Split(text, ":")
- if len(kv) != 2 {
- return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text)
- }
-
- k := strings.TrimSpace(kv[0])
- v := strings.TrimSpace(kv[1])
-
- // Parse the key/value pair into the currently focused element.
- c := &out[len(out)-1]
- if err := c.parseKV(k, v); err != nil {
- return nil, err
- }
- }
-
- if err := s.Err(); err != nil {
- return nil, err
- }
-
- return out, nil
-}
-
-// parseKV parses a key/value pair into the appropriate field of c.
-func (c *Crypto) parseKV(k, v string) error {
- vp := util.NewValueParser(v)
-
- switch k {
- case "async":
- // Interpret literal yes as true.
- c.Async = v == "yes"
- case "blocksize":
- c.Blocksize = vp.PUInt64()
- case "chunksize":
- c.Chunksize = vp.PUInt64()
- case "digestsize":
- c.Digestsize = vp.PUInt64()
- case "driver":
- c.Driver = v
- case "geniv":
- c.Geniv = v
- case "internal":
- c.Internal = v
- case "ivsize":
- c.Ivsize = vp.PUInt64()
- case "maxauthsize":
- c.Maxauthsize = vp.PUInt64()
- case "max keysize":
- c.MaxKeysize = vp.PUInt64()
- case "min keysize":
- c.MinKeysize = vp.PUInt64()
- case "module":
- c.Module = v
- case "name":
- c.Name = v
- case "priority":
- c.Priority = vp.PInt64()
- case "refcnt":
- c.Refcnt = vp.PInt64()
- case "seedsize":
- c.Seedsize = vp.PUInt64()
- case "selftest":
- c.Selftest = v
- case "type":
- c.Type = v
- case "walksize":
- c.Walksize = vp.PUInt64()
- }
-
- return vp.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
deleted file mode 100644
index f9d961e44..000000000
--- a/vendor/github.com/prometheus/procfs/doc.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2014 Prometheus Team
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package procfs provides functions to retrieve system, kernel and process
-// metrics from the pseudo-filesystem proc.
-//
-// Example:
-//
-// package main
-//
-// import (
-// "fmt"
-// "log"
-//
-// "github.com/prometheus/procfs"
-// )
-//
-// func main() {
-// p, err := procfs.Self()
-// if err != nil {
-// log.Fatalf("could not get process: %s", err)
-// }
-//
-// stat, err := p.Stat()
-// if err != nil {
-// log.Fatalf("could not get process stat: %s", err)
-// }
-//
-// fmt.Printf("command: %s\n", stat.Comm)
-// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
-// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
-// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
-// }
-package procfs
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
deleted file mode 100644
index 4980c875b..000000000
--- a/vendor/github.com/prometheus/procfs/fs.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "github.com/prometheus/procfs/internal/fs"
-)
-
-// FS represents the pseudo-filesystem sys, which provides an interface to
-// kernel data structures.
-type FS struct {
- proc fs.FS
- isReal bool
-}
-
-// DefaultMountPoint is the common mount point of the proc filesystem.
-const DefaultMountPoint = fs.DefaultProcMountPoint
-
-// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
-// It will error if the mount point directory can't be read or is a file.
-func NewDefaultFS() (FS, error) {
- return NewFS(DefaultMountPoint)
-}
-
-// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error
-// if the mount point directory can't be read or is a file.
-func NewFS(mountPoint string) (FS, error) {
- fs, err := fs.NewFS(mountPoint)
- if err != nil {
- return FS{}, err
- }
-
- isReal, err := isRealProc(mountPoint)
- if err != nil {
- return FS{}, err
- }
-
- return FS{fs, isReal}, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
deleted file mode 100644
index 134767d69..000000000
--- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !freebsd && !linux
-// +build !freebsd,!linux
-
-package procfs
-
-// isRealProc returns true on architectures that don't have a Type argument
-// in their Statfs_t struct
-func isRealProc(mountPoint string) (bool, error) {
- return true, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
deleted file mode 100644
index 80df79c31..000000000
--- a/vendor/github.com/prometheus/procfs/fs_statfs_type.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build freebsd || linux
-// +build freebsd linux
-
-package procfs
-
-import (
- "syscall"
-)
-
-// isRealProc determines whether supplied mountpoint is really a proc filesystem.
-func isRealProc(mountPoint string) (bool, error) {
- stat := syscall.Statfs_t{}
- err := syscall.Statfs(mountPoint, &stat)
- if err != nil {
- return false, err
- }
-
- // 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87
- return stat.Type == 0x9fa0, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go
deleted file mode 100644
index cf2e3eaa0..000000000
--- a/vendor/github.com/prometheus/procfs/fscache.go
+++ /dev/null
@@ -1,422 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Fscacheinfo represents fscache statistics.
-type Fscacheinfo struct {
- // Number of index cookies allocated
- IndexCookiesAllocated uint64
- // data storage cookies allocated
- DataStorageCookiesAllocated uint64
- // Number of special cookies allocated
- SpecialCookiesAllocated uint64
- // Number of objects allocated
- ObjectsAllocated uint64
- // Number of object allocation failures
- ObjectAllocationsFailure uint64
- // Number of objects that reached the available state
- ObjectsAvailable uint64
- // Number of objects that reached the dead state
- ObjectsDead uint64
- // Number of objects that didn't have a coherency check
- ObjectsWithoutCoherencyCheck uint64
- // Number of objects that passed a coherency check
- ObjectsWithCoherencyCheck uint64
- // Number of objects that needed a coherency data update
- ObjectsNeedCoherencyCheckUpdate uint64
- // Number of objects that were declared obsolete
- ObjectsDeclaredObsolete uint64
- // Number of pages marked as being cached
- PagesMarkedAsBeingCached uint64
- // Number of uncache page requests seen
- UncachePagesRequestSeen uint64
- // Number of acquire cookie requests seen
- AcquireCookiesRequestSeen uint64
- // Number of acq reqs given a NULL parent
- AcquireRequestsWithNullParent uint64
- // Number of acq reqs rejected due to no cache available
- AcquireRequestsRejectedNoCacheAvailable uint64
- // Number of acq reqs succeeded
- AcquireRequestsSucceeded uint64
- // Number of acq reqs rejected due to error
- AcquireRequestsRejectedDueToError uint64
- // Number of acq reqs failed on ENOMEM
- AcquireRequestsFailedDueToEnomem uint64
- // Number of lookup calls made on cache backends
- LookupsNumber uint64
- // Number of negative lookups made
- LookupsNegative uint64
- // Number of positive lookups made
- LookupsPositive uint64
- // Number of objects created by lookup
- ObjectsCreatedByLookup uint64
- // Number of lookups timed out and requeued
- LookupsTimedOutAndRequed uint64
- InvalidationsNumber uint64
- InvalidationsRunning uint64
- // Number of update cookie requests seen
- UpdateCookieRequestSeen uint64
- // Number of upd reqs given a NULL parent
- UpdateRequestsWithNullParent uint64
- // Number of upd reqs granted CPU time
- UpdateRequestsRunning uint64
- // Number of relinquish cookie requests seen
- RelinquishCookiesRequestSeen uint64
- // Number of rlq reqs given a NULL parent
- RelinquishCookiesWithNullParent uint64
- // Number of rlq reqs waited on completion of creation
- RelinquishRequestsWaitingCompleteCreation uint64
- // Relinqs rtr
- RelinquishRetries uint64
- // Number of attribute changed requests seen
- AttributeChangedRequestsSeen uint64
- // Number of attr changed requests queued
- AttributeChangedRequestsQueued uint64
- // Number of attr changed rejected -ENOBUFS
- AttributeChangedRejectDueToEnobufs uint64
- // Number of attr changed failed -ENOMEM
- AttributeChangedFailedDueToEnomem uint64
- // Number of attr changed ops given CPU time
- AttributeChangedOps uint64
- // Number of allocation requests seen
- AllocationRequestsSeen uint64
- // Number of successful alloc reqs
- AllocationOkRequests uint64
- // Number of alloc reqs that waited on lookup completion
- AllocationWaitingOnLookup uint64
- // Number of alloc reqs rejected -ENOBUFS
- AllocationsRejectedDueToEnobufs uint64
- // Number of alloc reqs aborted -ERESTARTSYS
- AllocationsAbortedDueToErestartsys uint64
- // Number of alloc reqs submitted
- AllocationOperationsSubmitted uint64
- // Number of alloc reqs waited for CPU time
- AllocationsWaitedForCPU uint64
- // Number of alloc reqs aborted due to object death
- AllocationsAbortedDueToObjectDeath uint64
- // Number of retrieval (read) requests seen
- RetrievalsReadRequests uint64
- // Number of successful retr reqs
- RetrievalsOk uint64
- // Number of retr reqs that waited on lookup completion
- RetrievalsWaitingLookupCompletion uint64
- // Number of retr reqs returned -ENODATA
- RetrievalsReturnedEnodata uint64
- // Number of retr reqs rejected -ENOBUFS
- RetrievalsRejectedDueToEnobufs uint64
- // Number of retr reqs aborted -ERESTARTSYS
- RetrievalsAbortedDueToErestartsys uint64
- // Number of retr reqs failed -ENOMEM
- RetrievalsFailedDueToEnomem uint64
- // Number of retr reqs submitted
- RetrievalsRequests uint64
- // Number of retr reqs waited for CPU time
- RetrievalsWaitingCPU uint64
- // Number of retr reqs aborted due to object death
- RetrievalsAbortedDueToObjectDeath uint64
- // Number of storage (write) requests seen
- StoreWriteRequests uint64
- // Number of successful store reqs
- StoreSuccessfulRequests uint64
- // Number of store reqs on a page already pending storage
- StoreRequestsOnPendingStorage uint64
- // Number of store reqs rejected -ENOBUFS
- StoreRequestsRejectedDueToEnobufs uint64
- // Number of store reqs failed -ENOMEM
- StoreRequestsFailedDueToEnomem uint64
- // Number of store reqs submitted
- StoreRequestsSubmitted uint64
- // Number of store reqs granted CPU time
- StoreRequestsRunning uint64
- // Number of pages given store req processing time
- StorePagesWithRequestsProcessing uint64
- // Number of store reqs deleted from tracking tree
- StoreRequestsDeleted uint64
- // Number of store reqs over store limit
- StoreRequestsOverStoreLimit uint64
- // Number of release reqs against pages with no pending store
- ReleaseRequestsAgainstPagesWithNoPendingStorage uint64
- // Number of release reqs against pages stored by time lock granted
- ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
- // Number of release reqs ignored due to in-progress store
- ReleaseRequestsIgnoredDueToInProgressStore uint64
- // Number of page stores cancelled due to release req
- PageStoresCancelledByReleaseRequests uint64
- VmscanWaiting uint64
- // Number of times async ops added to pending queues
- OpsPending uint64
- // Number of times async ops given CPU time
- OpsRunning uint64
- // Number of times async ops queued for processing
- OpsEnqueued uint64
- // Number of async ops cancelled
- OpsCancelled uint64
- // Number of async ops rejected due to object lookup/create failure
- OpsRejected uint64
- // Number of async ops initialised
- OpsInitialised uint64
- // Number of async ops queued for deferred release
- OpsDeferred uint64
- // Number of async ops released (should equal ini=N when idle)
- OpsReleased uint64
- // Number of deferred-release async ops garbage collected
- OpsGarbageCollected uint64
- // Number of in-progress alloc_object() cache ops
- CacheopAllocationsinProgress uint64
- // Number of in-progress lookup_object() cache ops
- CacheopLookupObjectInProgress uint64
- // Number of in-progress lookup_complete() cache ops
- CacheopLookupCompleteInPorgress uint64
- // Number of in-progress grab_object() cache ops
- CacheopGrabObjectInProgress uint64
- CacheopInvalidations uint64
- // Number of in-progress update_object() cache ops
- CacheopUpdateObjectInProgress uint64
- // Number of in-progress drop_object() cache ops
- CacheopDropObjectInProgress uint64
- // Number of in-progress put_object() cache ops
- CacheopPutObjectInProgress uint64
- // Number of in-progress attr_changed() cache ops
- CacheopAttributeChangeInProgress uint64
- // Number of in-progress sync_cache() cache ops
- CacheopSyncCacheInProgress uint64
- // Number of in-progress read_or_alloc_page() cache ops
- CacheopReadOrAllocPageInProgress uint64
- // Number of in-progress read_or_alloc_pages() cache ops
- CacheopReadOrAllocPagesInProgress uint64
- // Number of in-progress allocate_page() cache ops
- CacheopAllocatePageInProgress uint64
- // Number of in-progress allocate_pages() cache ops
- CacheopAllocatePagesInProgress uint64
- // Number of in-progress write_page() cache ops
- CacheopWritePagesInProgress uint64
- // Number of in-progress uncache_page() cache ops
- CacheopUncachePagesInProgress uint64
- // Number of in-progress dissociate_pages() cache ops
- CacheopDissociatePagesInProgress uint64
- // Number of object lookups/creations rejected due to lack of space
- CacheevLookupsAndCreationsRejectedLackSpace uint64
- // Number of stale objects deleted
- CacheevStaleObjectsDeleted uint64
- // Number of objects retired when relinquished
- CacheevRetiredWhenReliquished uint64
- // Number of objects culled
- CacheevObjectsCulled uint64
-}
-
-// Fscacheinfo returns information about current fscache statistics.
-// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt
-func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
- b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats"))
- if err != nil {
- return Fscacheinfo{}, err
- }
-
- m, err := parseFscacheinfo(bytes.NewReader(b))
- if err != nil {
- return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err)
- }
-
- return *m, nil
-}
-
-func setFSCacheFields(fields []string, setFields ...*uint64) error {
- var err error
- if len(fields) < len(setFields) {
- return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
- }
-
- for i := range setFields {
- *setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) {
- var m Fscacheinfo
- s := bufio.NewScanner(r)
- for s.Scan() {
- fields := strings.Fields(s.Text())
- if len(fields) < 2 {
- return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text())
- }
-
- switch fields[0] {
- case "Cookies:":
- err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated,
- &m.SpecialCookiesAllocated)
- if err != nil {
- return &m, err
- }
- case "Objects:":
- err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure,
- &m.ObjectsAvailable, &m.ObjectsDead)
- if err != nil {
- return &m, err
- }
- case "ChkAux":
- err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck,
- &m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete)
- if err != nil {
- return &m, err
- }
- case "Pages":
- err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen)
- if err != nil {
- return &m, err
- }
- case "Acquire:":
- err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent,
- &m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError,
- &m.AcquireRequestsFailedDueToEnomem)
- if err != nil {
- return &m, err
- }
- case "Lookups:":
- err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive,
- &m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed)
- if err != nil {
- return &m, err
- }
- case "Invals":
- err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning)
- if err != nil {
- return &m, err
- }
- case "Updates:":
- err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent,
- &m.UpdateRequestsRunning)
- if err != nil {
- return &m, err
- }
- case "Relinqs:":
- err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent,
- &m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries)
- if err != nil {
- return &m, err
- }
- case "AttrChg:":
- err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued,
- &m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps)
- if err != nil {
- return &m, err
- }
- case "Allocs":
- if strings.Split(fields[2], "=")[0] == "n" {
- err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests,
- &m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys)
- if err != nil {
- return &m, err
- }
- } else {
- err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU,
- &m.AllocationsAbortedDueToObjectDeath)
- if err != nil {
- return &m, err
- }
- }
- case "Retrvls:":
- if strings.Split(fields[1], "=")[0] == "n" {
- err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion,
- &m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys,
- &m.RetrievalsFailedDueToEnomem)
- if err != nil {
- return &m, err
- }
- } else {
- err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath)
- if err != nil {
- return &m, err
- }
- }
- case "Stores":
- if strings.Split(fields[2], "=")[0] == "n" {
- err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests,
- &m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem)
- if err != nil {
- return &m, err
- }
- } else {
- err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning,
- &m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit)
- if err != nil {
- return &m, err
- }
- }
- case "VmScan":
- err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage,
- &m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore,
- &m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting)
- if err != nil {
- return &m, err
- }
- case "Ops":
- if strings.Split(fields[2], "=")[0] == "pend" {
- err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected)
- if err != nil {
- return &m, err
- }
- } else {
- err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected)
- if err != nil {
- return &m, err
- }
- }
- case "CacheOp:":
- if strings.Split(fields[1], "=")[0] == "alo" {
- err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress,
- &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress)
- if err != nil {
- return &m, err
- }
- } else if strings.Split(fields[1], "=")[0] == "inv" {
- err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress,
- &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress,
- &m.CacheopSyncCacheInProgress)
- if err != nil {
- return &m, err
- }
- } else {
- err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress,
- &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress,
- &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress)
- if err != nil {
- return &m, err
- }
- }
- case "CacheEv:":
- err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted,
- &m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled)
- if err != nil {
- return &m, err
- }
- }
- }
-
- return &m, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
deleted file mode 100644
index 3c18c7610..000000000
--- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fs
-
-import (
- "fmt"
- "os"
- "path/filepath"
-)
-
-const (
- // DefaultProcMountPoint is the common mount point of the proc filesystem.
- DefaultProcMountPoint = "/proc"
-
- // DefaultSysMountPoint is the common mount point of the sys filesystem.
- DefaultSysMountPoint = "/sys"
-
- // DefaultConfigfsMountPoint is the common mount point of the configfs.
- DefaultConfigfsMountPoint = "/sys/kernel/config"
-)
-
-// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
-// interface to kernel data structures.
-type FS string
-
-// NewFS returns a new FS mounted under the given mountPoint. It will error
-// if the mount point can't be read.
-func NewFS(mountPoint string) (FS, error) {
- info, err := os.Stat(mountPoint)
- if err != nil {
- return "", fmt.Errorf("could not read %q: %w", mountPoint, err)
- }
- if !info.IsDir() {
- return "", fmt.Errorf("mount point %q is not a directory", mountPoint)
- }
-
- return FS(mountPoint), nil
-}
-
-// Path appends the given path elements to the filesystem path, adding separators
-// as necessary.
-func (fs FS) Path(p ...string) string {
- return filepath.Join(append([]string{string(fs)}, p...)...)
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
deleted file mode 100644
index 14272dc78..000000000
--- a/vendor/github.com/prometheus/procfs/internal/util/parse.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package util
-
-import (
- "os"
- "strconv"
- "strings"
-)
-
-// ParseUint32s parses a slice of strings into a slice of uint32s.
-func ParseUint32s(ss []string) ([]uint32, error) {
- us := make([]uint32, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseUint(s, 10, 32)
- if err != nil {
- return nil, err
- }
-
- us = append(us, uint32(u))
- }
-
- return us, nil
-}
-
-// ParseUint64s parses a slice of strings into a slice of uint64s.
-func ParseUint64s(ss []string) ([]uint64, error) {
- us := make([]uint64, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return nil, err
- }
-
- us = append(us, u)
- }
-
- return us, nil
-}
-
-// ParsePInt64s parses a slice of strings into a slice of int64 pointers.
-func ParsePInt64s(ss []string) ([]*int64, error) {
- us := make([]*int64, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseInt(s, 10, 64)
- if err != nil {
- return nil, err
- }
-
- us = append(us, &u)
- }
-
- return us, nil
-}
-
-// Parses a uint64 from given hex in string.
-func ParseHexUint64s(ss []string) ([]*uint64, error) {
- us := make([]*uint64, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseUint(s, 16, 64)
- if err != nil {
- return nil, err
- }
-
- us = append(us, &u)
- }
-
- return us, nil
-}
-
-// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
-func ReadUintFromFile(path string) (uint64, error) {
- data, err := os.ReadFile(path)
- if err != nil {
- return 0, err
- }
- return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
-}
-
-// ReadIntFromFile reads a file and attempts to parse a int64 from it.
-func ReadIntFromFile(path string) (int64, error) {
- data, err := os.ReadFile(path)
- if err != nil {
- return 0, err
- }
- return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
-}
-
-// ParseBool parses a string into a boolean pointer.
-func ParseBool(b string) *bool {
- var truth bool
- switch b {
- case "enabled":
- truth = true
- case "disabled":
- truth = false
- default:
- return nil
- }
- return &truth
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go
deleted file mode 100644
index 71b7a70eb..000000000
--- a/vendor/github.com/prometheus/procfs/internal/util/readfile.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package util
-
-import (
- "io"
- "os"
-)
-
-// ReadFileNoStat uses io.ReadAll to read contents of entire file.
-// This is similar to os.ReadFile but without the call to os.Stat, because
-// many files in /proc and /sys report incorrect file sizes (either 0 or 4096).
-// Reads a max file size of 1024kB. For files larger than this, a scanner
-// should be used.
-func ReadFileNoStat(filename string) ([]byte, error) {
- const maxBufferSize = 1024 * 1024
-
- f, err := os.Open(filename)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- reader := io.LimitReader(f, maxBufferSize)
- return io.ReadAll(reader)
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
deleted file mode 100644
index 1ab875cee..000000000
--- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build (linux || darwin) && !appengine
-// +build linux darwin
-// +build !appengine
-
-package util
-
-import (
- "bytes"
- "os"
- "syscall"
-)
-
-// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly.
-// https://github.com/prometheus/node_exporter/pull/728/files
-//
-// Note that this function will not read files larger than 128 bytes.
-func SysReadFile(file string) (string, error) {
- f, err := os.Open(file)
- if err != nil {
- return "", err
- }
- defer f.Close()
-
- // On some machines, hwmon drivers are broken and return EAGAIN. This causes
- // Go's os.ReadFile implementation to poll forever.
- //
- // Since we either want to read data or bail immediately, do the simplest
- // possible read using syscall directly.
- const sysFileBufferSize = 128
- b := make([]byte, sysFileBufferSize)
- n, err := syscall.Read(int(f.Fd()), b)
- if err != nil {
- return "", err
- }
-
- return string(bytes.TrimSpace(b[:n])), nil
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
deleted file mode 100644
index 1d86f5e63..000000000
--- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build (linux && appengine) || (!linux && !darwin)
-// +build linux,appengine !linux,!darwin
-
-package util
-
-import (
- "fmt"
-)
-
-// SysReadFile is here implemented as a noop for builds that do not support
-// the read syscall. For example Windows, or Linux on Google App Engine.
-func SysReadFile(file string) (string, error) {
- return "", fmt.Errorf("not supported on this platform")
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go
deleted file mode 100644
index fe2355d3c..000000000
--- a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package util
-
-import (
- "strconv"
-)
-
-// TODO(mdlayher): util packages are an anti-pattern and this should be moved
-// somewhere else that is more focused in the future.
-
-// A ValueParser enables parsing a single string into a variety of data types
-// in a concise and safe way. The Err method must be invoked after invoking
-// any other methods to ensure a value was successfully parsed.
-type ValueParser struct {
- v string
- err error
-}
-
-// NewValueParser creates a ValueParser using the input string.
-func NewValueParser(v string) *ValueParser {
- return &ValueParser{v: v}
-}
-
-// Int interprets the underlying value as an int and returns that value.
-func (vp *ValueParser) Int() int { return int(vp.int64()) }
-
-// PInt64 interprets the underlying value as an int64 and returns a pointer to
-// that value.
-func (vp *ValueParser) PInt64() *int64 {
- if vp.err != nil {
- return nil
- }
-
- v := vp.int64()
- return &v
-}
-
-// int64 interprets the underlying value as an int64 and returns that value.
-// TODO: export if/when necessary.
-func (vp *ValueParser) int64() int64 {
- if vp.err != nil {
- return 0
- }
-
- // A base value of zero makes ParseInt infer the correct base using the
- // string's prefix, if any.
- const base = 0
- v, err := strconv.ParseInt(vp.v, base, 64)
- if err != nil {
- vp.err = err
- return 0
- }
-
- return v
-}
-
-// PUInt64 interprets the underlying value as an uint64 and returns a pointer to
-// that value.
-func (vp *ValueParser) PUInt64() *uint64 {
- if vp.err != nil {
- return nil
- }
-
- // A base value of zero makes ParseInt infer the correct base using the
- // string's prefix, if any.
- const base = 0
- v, err := strconv.ParseUint(vp.v, base, 64)
- if err != nil {
- vp.err = err
- return nil
- }
-
- return &v
-}
-
-// Err returns the last error, if any, encountered by the ValueParser.
-func (vp *ValueParser) Err() error {
- return vp.err
-}
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
deleted file mode 100644
index bc3a20c93..000000000
--- a/vendor/github.com/prometheus/procfs/ipvs.go
+++ /dev/null
@@ -1,241 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "encoding/hex"
- "errors"
- "fmt"
- "io"
- "net"
- "os"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
-type IPVSStats struct {
- // Total count of connections.
- Connections uint64
- // Total incoming packages processed.
- IncomingPackets uint64
- // Total outgoing packages processed.
- OutgoingPackets uint64
- // Total incoming traffic.
- IncomingBytes uint64
- // Total outgoing traffic.
- OutgoingBytes uint64
-}
-
-// IPVSBackendStatus holds current metrics of one virtual / real address pair.
-type IPVSBackendStatus struct {
- // The local (virtual) IP address.
- LocalAddress net.IP
- // The remote (real) IP address.
- RemoteAddress net.IP
- // The local (virtual) port.
- LocalPort uint16
- // The remote (real) port.
- RemotePort uint16
- // The local firewall mark
- LocalMark string
- // The transport protocol (TCP, UDP).
- Proto string
- // The current number of active connections for this virtual/real address pair.
- ActiveConn uint64
- // The current number of inactive connections for this virtual/real address pair.
- InactConn uint64
- // The current weight of this virtual/real address pair.
- Weight uint64
-}
-
-// IPVSStats reads the IPVS statistics from the specified `proc` filesystem.
-func (fs FS) IPVSStats() (IPVSStats, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats"))
- if err != nil {
- return IPVSStats{}, err
- }
-
- return parseIPVSStats(bytes.NewReader(data))
-}
-
-// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
-func parseIPVSStats(r io.Reader) (IPVSStats, error) {
- var (
- statContent []byte
- statLines []string
- statFields []string
- stats IPVSStats
- )
-
- statContent, err := io.ReadAll(r)
- if err != nil {
- return IPVSStats{}, err
- }
-
- statLines = strings.SplitN(string(statContent), "\n", 4)
- if len(statLines) != 4 {
- return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
- }
-
- statFields = strings.Fields(statLines[2])
- if len(statFields) != 5 {
- return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
- }
-
- stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
- stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
- stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
- stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
- stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
-
- return stats, nil
-}
-
-// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
-func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) {
- file, err := os.Open(fs.proc.Path("net/ip_vs"))
- if err != nil {
- return nil, err
- }
- defer file.Close()
-
- return parseIPVSBackendStatus(file)
-}
-
-func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
- var (
- status []IPVSBackendStatus
- scanner = bufio.NewScanner(file)
- proto string
- localMark string
- localAddress net.IP
- localPort uint16
- err error
- )
-
- for scanner.Scan() {
- fields := strings.Fields(scanner.Text())
- if len(fields) == 0 {
- continue
- }
- switch {
- case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
- continue
- case fields[0] == "TCP" || fields[0] == "UDP":
- if len(fields) < 2 {
- continue
- }
- proto = fields[0]
- localMark = ""
- localAddress, localPort, err = parseIPPort(fields[1])
- if err != nil {
- return nil, err
- }
- case fields[0] == "FWM":
- if len(fields) < 2 {
- continue
- }
- proto = fields[0]
- localMark = fields[1]
- localAddress = nil
- localPort = 0
- case fields[0] == "->":
- if len(fields) < 6 {
- continue
- }
- remoteAddress, remotePort, err := parseIPPort(fields[1])
- if err != nil {
- return nil, err
- }
- weight, err := strconv.ParseUint(fields[3], 10, 64)
- if err != nil {
- return nil, err
- }
- activeConn, err := strconv.ParseUint(fields[4], 10, 64)
- if err != nil {
- return nil, err
- }
- inactConn, err := strconv.ParseUint(fields[5], 10, 64)
- if err != nil {
- return nil, err
- }
- status = append(status, IPVSBackendStatus{
- LocalAddress: localAddress,
- LocalPort: localPort,
- LocalMark: localMark,
- RemoteAddress: remoteAddress,
- RemotePort: remotePort,
- Proto: proto,
- Weight: weight,
- ActiveConn: activeConn,
- InactConn: inactConn,
- })
- }
- }
- return status, nil
-}
-
-func parseIPPort(s string) (net.IP, uint16, error) {
- var (
- ip net.IP
- err error
- )
-
- switch len(s) {
- case 13:
- ip, err = hex.DecodeString(s[0:8])
- if err != nil {
- return nil, 0, err
- }
- case 46:
- ip = net.ParseIP(s[1:40])
- if ip == nil {
- return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
- }
- default:
- return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
- }
-
- portString := s[len(s)-4:]
- if len(portString) != 4 {
- return nil, 0,
- fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err)
- }
- port, err := strconv.ParseUint(portString, 16, 16)
- if err != nil {
- return nil, 0, err
- }
-
- return ip, uint16(port), nil
-}
diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go
deleted file mode 100644
index db88566bd..000000000
--- a/vendor/github.com/prometheus/procfs/kernel_random.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !windows
-// +build !windows
-
-package procfs
-
-import (
- "os"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// KernelRandom contains information about to the kernel's random number generator.
-type KernelRandom struct {
- // EntropyAvaliable gives the available entropy, in bits.
- EntropyAvaliable *uint64
- // PoolSize gives the size of the entropy pool, in bits.
- PoolSize *uint64
- // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded.
- URandomMinReseedSeconds *uint64
- // WriteWakeupThreshold the number of bits of entropy below which we wake up processes
- // that do a select(2) or poll(2) for write access to /dev/random.
- WriteWakeupThreshold *uint64
- // ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep
- // waiting for entropy from /dev/random.
- ReadWakeupThreshold *uint64
-}
-
-// KernelRandom returns values from /proc/sys/kernel/random.
-func (fs FS) KernelRandom() (KernelRandom, error) {
- random := KernelRandom{}
-
- for file, p := range map[string]**uint64{
- "entropy_avail": &random.EntropyAvaliable,
- "poolsize": &random.PoolSize,
- "urandom_min_reseed_secs": &random.URandomMinReseedSeconds,
- "write_wakeup_threshold": &random.WriteWakeupThreshold,
- "read_wakeup_threshold": &random.ReadWakeupThreshold,
- } {
- val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file))
- if os.IsNotExist(err) {
- continue
- }
- if err != nil {
- return random, err
- }
- *p = &val
- }
-
- return random, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go
deleted file mode 100644
index 332e76c17..000000000
--- a/vendor/github.com/prometheus/procfs/loadavg.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// LoadAvg represents an entry in /proc/loadavg.
-type LoadAvg struct {
- Load1 float64
- Load5 float64
- Load15 float64
-}
-
-// LoadAvg returns loadavg from /proc.
-func (fs FS) LoadAvg() (*LoadAvg, error) {
- path := fs.proc.Path("loadavg")
-
- data, err := util.ReadFileNoStat(path)
- if err != nil {
- return nil, err
- }
- return parseLoad(data)
-}
-
-// Parse /proc loadavg and return 1m, 5m and 15m.
-func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
- loads := make([]float64, 3)
- parts := strings.Fields(string(loadavgBytes))
- if len(parts) < 3 {
- return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes))
- }
-
- var err error
- for i, load := range parts[0:3] {
- loads[i], err = strconv.ParseFloat(load, 64)
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
- }
- }
- return &LoadAvg{
- Load1: loads[0],
- Load5: loads[1],
- Load15: loads[2],
- }, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
deleted file mode 100644
index 67a9d2b44..000000000
--- a/vendor/github.com/prometheus/procfs/mdstat.go
+++ /dev/null
@@ -1,276 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
- "os"
- "regexp"
- "strconv"
- "strings"
-)
-
-var (
- statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
- recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`)
- recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
- recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
- recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
- componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`)
-)
-
-// MDStat holds info parsed from /proc/mdstat.
-type MDStat struct {
- // Name of the device.
- Name string
- // activity-state of the device.
- ActivityState string
- // Number of active disks.
- DisksActive int64
- // Total number of disks the device requires.
- DisksTotal int64
- // Number of failed disks.
- DisksFailed int64
- // Number of "down" disks. (the _ indicator in the status line)
- DisksDown int64
- // Spare disks in the device.
- DisksSpare int64
- // Number of blocks the device holds.
- BlocksTotal int64
- // Number of blocks on the device that are in sync.
- BlocksSynced int64
- // Number of blocks on the device that need to be synced.
- BlocksToBeSynced int64
- // progress percentage of current sync
- BlocksSyncedPct float64
- // estimated finishing time for current sync (in minutes)
- BlocksSyncedFinishTime float64
- // current sync speed (in Kilobytes/sec)
- BlocksSyncedSpeed float64
- // Name of md component devices
- Devices []string
-}
-
-// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of
-// structs containing the relevant info. More information available here:
-// https://raid.wiki.kernel.org/index.php/Mdstat
-func (fs FS) MDStat() ([]MDStat, error) {
- data, err := os.ReadFile(fs.proc.Path("mdstat"))
- if err != nil {
- return nil, err
- }
- mdstat, err := parseMDStat(data)
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
- }
- return mdstat, nil
-}
-
-// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of
-// structs containing the relevant info.
-func parseMDStat(mdStatData []byte) ([]MDStat, error) {
- mdStats := []MDStat{}
- lines := strings.Split(string(mdStatData), "\n")
-
- for i, line := range lines {
- if strings.TrimSpace(line) == "" || line[0] == ' ' ||
- strings.HasPrefix(line, "Personalities") ||
- strings.HasPrefix(line, "unused") {
- continue
- }
-
- deviceFields := strings.Fields(line)
- if len(deviceFields) < 3 {
- return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line)
- }
- mdName := deviceFields[0] // mdx
- state := deviceFields[2] // active or inactive
-
- if len(lines) <= i+3 {
- return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName)
- }
-
- // Failed disks have the suffix (F) & Spare disks have the suffix (S).
- fail := int64(strings.Count(line, "(F)"))
- spare := int64(strings.Count(line, "(S)"))
- active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
-
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
- }
-
- syncLineIdx := i + 2
- if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
- syncLineIdx++
- }
-
- // If device is syncing at the moment, get the number of currently
- // synced bytes, otherwise that number equals the size of the device.
- blocksSynced := size
- blocksToBeSynced := size
- speed := float64(0)
- finish := float64(0)
- pct := float64(0)
- recovering := strings.Contains(lines[syncLineIdx], "recovery")
- resyncing := strings.Contains(lines[syncLineIdx], "resync")
- checking := strings.Contains(lines[syncLineIdx], "check")
-
- // Append recovery and resyncing state info.
- if recovering || resyncing || checking {
- if recovering {
- state = "recovering"
- } else if checking {
- state = "checking"
- } else {
- state = "resyncing"
- }
-
- // Handle case when resync=PENDING or resync=DELAYED.
- if strings.Contains(lines[syncLineIdx], "PENDING") ||
- strings.Contains(lines[syncLineIdx], "DELAYED") {
- blocksSynced = 0
- } else {
- blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
- }
- }
- }
-
- mdStats = append(mdStats, MDStat{
- Name: mdName,
- ActivityState: state,
- DisksActive: active,
- DisksFailed: fail,
- DisksDown: down,
- DisksSpare: spare,
- DisksTotal: total,
- BlocksTotal: size,
- BlocksSynced: blocksSynced,
- BlocksToBeSynced: blocksToBeSynced,
- BlocksSyncedPct: pct,
- BlocksSyncedFinishTime: finish,
- BlocksSyncedSpeed: speed,
- Devices: evalComponentDevices(deviceFields),
- })
- }
-
- return mdStats, nil
-}
-
-func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
- statusFields := strings.Fields(statusLine)
- if len(statusFields) < 1 {
- return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
- }
-
- sizeStr := statusFields[0]
- size, err = strconv.ParseInt(sizeStr, 10, 64)
- if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
- }
-
- if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
- // In the device deviceLine, only disks have a number associated with them in [].
- total = int64(strings.Count(deviceLine, "["))
- return total, total, 0, size, nil
- }
-
- if strings.Contains(deviceLine, "inactive") {
- return 0, 0, 0, size, nil
- }
-
- matches := statusLineRE.FindStringSubmatch(statusLine)
- if len(matches) != 5 {
- return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
- }
-
- total, err = strconv.ParseInt(matches[2], 10, 64)
- if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
- }
-
- active, err = strconv.ParseInt(matches[3], 10, 64)
- if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err)
- }
- down = int64(strings.Count(matches[4], "_"))
-
- return active, total, down, size, nil
-}
-
-func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) {
- matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
- if len(matches) != 2 {
- return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err)
- }
-
- blocks := strings.Split(matches[1], "/")
- blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64)
- if err != nil {
- return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err)
- }
-
- blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64)
- if err != nil {
- return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err)
- }
-
- // Get percentage complete
- matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
- if len(matches) != 2 {
- return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
- }
- pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
- if err != nil {
- return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
- }
-
- // Get time expected left to complete
- matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
- if len(matches) != 2 {
- return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
- }
- finish, err = strconv.ParseFloat(matches[1], 64)
- if err != nil {
- return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
- }
-
- // Get recovery speed
- matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
- if len(matches) != 2 {
- return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
- }
- speed, err = strconv.ParseFloat(matches[1], 64)
- if err != nil {
- return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
- }
-
- return blocksSynced, blocksToBeSynced, pct, finish, speed, nil
-}
-
-func evalComponentDevices(deviceFields []string) []string {
- mdComponentDevices := make([]string, 0)
- if len(deviceFields) > 3 {
- for _, field := range deviceFields[4:] {
- match := componentDeviceRE.FindStringSubmatch(field)
- if match == nil {
- continue
- }
- mdComponentDevices = append(mdComponentDevices, match[1])
- }
- }
-
- return mdComponentDevices
-}
diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go
deleted file mode 100644
index 4b2c4050a..000000000
--- a/vendor/github.com/prometheus/procfs/meminfo.go
+++ /dev/null
@@ -1,389 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Meminfo represents memory statistics.
-type Meminfo struct {
- // Total usable ram (i.e. physical ram minus a few reserved
- // bits and the kernel binary code)
- MemTotal *uint64
- // The sum of LowFree+HighFree
- MemFree *uint64
- // An estimate of how much memory is available for starting
- // new applications, without swapping. Calculated from
- // MemFree, SReclaimable, the size of the file LRU lists, and
- // the low watermarks in each zone. The estimate takes into
- // account that the system needs some page cache to function
- // well, and that not all reclaimable slab will be
- // reclaimable, due to items being in use. The impact of those
- // factors will vary from system to system.
- MemAvailable *uint64
- // Relatively temporary storage for raw disk blocks shouldn't
- // get tremendously large (20MB or so)
- Buffers *uint64
- Cached *uint64
- // Memory that once was swapped out, is swapped back in but
- // still also is in the swapfile (if memory is needed it
- // doesn't need to be swapped out AGAIN because it is already
- // in the swapfile. This saves I/O)
- SwapCached *uint64
- // Memory that has been used more recently and usually not
- // reclaimed unless absolutely necessary.
- Active *uint64
- // Memory which has been less recently used. It is more
- // eligible to be reclaimed for other purposes
- Inactive *uint64
- ActiveAnon *uint64
- InactiveAnon *uint64
- ActiveFile *uint64
- InactiveFile *uint64
- Unevictable *uint64
- Mlocked *uint64
- // total amount of swap space available
- SwapTotal *uint64
- // Memory which has been evicted from RAM, and is temporarily
- // on the disk
- SwapFree *uint64
- // Memory which is waiting to get written back to the disk
- Dirty *uint64
- // Memory which is actively being written back to the disk
- Writeback *uint64
- // Non-file backed pages mapped into userspace page tables
- AnonPages *uint64
- // files which have been mapped, such as libraries
- Mapped *uint64
- Shmem *uint64
- // in-kernel data structures cache
- Slab *uint64
- // Part of Slab, that might be reclaimed, such as caches
- SReclaimable *uint64
- // Part of Slab, that cannot be reclaimed on memory pressure
- SUnreclaim *uint64
- KernelStack *uint64
- // amount of memory dedicated to the lowest level of page
- // tables.
- PageTables *uint64
- // NFS pages sent to the server, but not yet committed to
- // stable storage
- NFSUnstable *uint64
- // Memory used for block device "bounce buffers"
- Bounce *uint64
- // Memory used by FUSE for temporary writeback buffers
- WritebackTmp *uint64
- // Based on the overcommit ratio ('vm.overcommit_ratio'),
- // this is the total amount of memory currently available to
- // be allocated on the system. This limit is only adhered to
- // if strict overcommit accounting is enabled (mode 2 in
- // 'vm.overcommit_memory').
- // The CommitLimit is calculated with the following formula:
- // CommitLimit = ([total RAM pages] - [total huge TLB pages]) *
- // overcommit_ratio / 100 + [total swap pages]
- // For example, on a system with 1G of physical RAM and 7G
- // of swap with a `vm.overcommit_ratio` of 30 it would
- // yield a CommitLimit of 7.3G.
- // For more details, see the memory overcommit documentation
- // in vm/overcommit-accounting.
- CommitLimit *uint64
- // The amount of memory presently allocated on the system.
- // The committed memory is a sum of all of the memory which
- // has been allocated by processes, even if it has not been
- // "used" by them as of yet. A process which malloc()'s 1G
- // of memory, but only touches 300M of it will show up as
- // using 1G. This 1G is memory which has been "committed" to
- // by the VM and can be used at any time by the allocating
- // application. With strict overcommit enabled on the system
- // (mode 2 in 'vm.overcommit_memory'),allocations which would
- // exceed the CommitLimit (detailed above) will not be permitted.
- // This is useful if one needs to guarantee that processes will
- // not fail due to lack of memory once that memory has been
- // successfully allocated.
- CommittedAS *uint64
- // total size of vmalloc memory area
- VmallocTotal *uint64
- // amount of vmalloc area which is used
- VmallocUsed *uint64
- // largest contiguous block of vmalloc area which is free
- VmallocChunk *uint64
- Percpu *uint64
- HardwareCorrupted *uint64
- AnonHugePages *uint64
- ShmemHugePages *uint64
- ShmemPmdMapped *uint64
- CmaTotal *uint64
- CmaFree *uint64
- HugePagesTotal *uint64
- HugePagesFree *uint64
- HugePagesRsvd *uint64
- HugePagesSurp *uint64
- Hugepagesize *uint64
- DirectMap4k *uint64
- DirectMap2M *uint64
- DirectMap1G *uint64
-
- // The struct fields below are the byte-normalized counterparts to the
- // existing struct fields. Values are normalized using the optional
- // unit field in the meminfo line.
- MemTotalBytes *uint64
- MemFreeBytes *uint64
- MemAvailableBytes *uint64
- BuffersBytes *uint64
- CachedBytes *uint64
- SwapCachedBytes *uint64
- ActiveBytes *uint64
- InactiveBytes *uint64
- ActiveAnonBytes *uint64
- InactiveAnonBytes *uint64
- ActiveFileBytes *uint64
- InactiveFileBytes *uint64
- UnevictableBytes *uint64
- MlockedBytes *uint64
- SwapTotalBytes *uint64
- SwapFreeBytes *uint64
- DirtyBytes *uint64
- WritebackBytes *uint64
- AnonPagesBytes *uint64
- MappedBytes *uint64
- ShmemBytes *uint64
- SlabBytes *uint64
- SReclaimableBytes *uint64
- SUnreclaimBytes *uint64
- KernelStackBytes *uint64
- PageTablesBytes *uint64
- NFSUnstableBytes *uint64
- BounceBytes *uint64
- WritebackTmpBytes *uint64
- CommitLimitBytes *uint64
- CommittedASBytes *uint64
- VmallocTotalBytes *uint64
- VmallocUsedBytes *uint64
- VmallocChunkBytes *uint64
- PercpuBytes *uint64
- HardwareCorruptedBytes *uint64
- AnonHugePagesBytes *uint64
- ShmemHugePagesBytes *uint64
- ShmemPmdMappedBytes *uint64
- CmaTotalBytes *uint64
- CmaFreeBytes *uint64
- HugepagesizeBytes *uint64
- DirectMap4kBytes *uint64
- DirectMap2MBytes *uint64
- DirectMap1GBytes *uint64
-}
-
-// Meminfo returns an information about current kernel/system memory statistics.
-// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
-func (fs FS) Meminfo() (Meminfo, error) {
- b, err := util.ReadFileNoStat(fs.proc.Path("meminfo"))
- if err != nil {
- return Meminfo{}, err
- }
-
- m, err := parseMemInfo(bytes.NewReader(b))
- if err != nil {
- return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err)
- }
-
- return *m, nil
-}
-
-func parseMemInfo(r io.Reader) (*Meminfo, error) {
- var m Meminfo
- s := bufio.NewScanner(r)
- for s.Scan() {
- fields := strings.Fields(s.Text())
- var val, valBytes uint64
-
- val, err := strconv.ParseUint(fields[1], 0, 64)
- if err != nil {
- return nil, err
- }
-
- switch len(fields) {
- case 2:
- // No unit present, use the parsed the value as bytes directly.
- valBytes = val
- case 3:
- // Unit present in optional 3rd field, convert it to
- // bytes. The only unit supported within the Linux
- // kernel is `kB`.
- if fields[2] != "kB" {
- return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2])
- }
-
- valBytes = 1024 * val
-
- default:
- return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
- }
-
- switch fields[0] {
- case "MemTotal:":
- m.MemTotal = &val
- m.MemTotalBytes = &valBytes
- case "MemFree:":
- m.MemFree = &val
- m.MemFreeBytes = &valBytes
- case "MemAvailable:":
- m.MemAvailable = &val
- m.MemAvailableBytes = &valBytes
- case "Buffers:":
- m.Buffers = &val
- m.BuffersBytes = &valBytes
- case "Cached:":
- m.Cached = &val
- m.CachedBytes = &valBytes
- case "SwapCached:":
- m.SwapCached = &val
- m.SwapCachedBytes = &valBytes
- case "Active:":
- m.Active = &val
- m.ActiveBytes = &valBytes
- case "Inactive:":
- m.Inactive = &val
- m.InactiveBytes = &valBytes
- case "Active(anon):":
- m.ActiveAnon = &val
- m.ActiveAnonBytes = &valBytes
- case "Inactive(anon):":
- m.InactiveAnon = &val
- m.InactiveAnonBytes = &valBytes
- case "Active(file):":
- m.ActiveFile = &val
- m.ActiveFileBytes = &valBytes
- case "Inactive(file):":
- m.InactiveFile = &val
- m.InactiveFileBytes = &valBytes
- case "Unevictable:":
- m.Unevictable = &val
- m.UnevictableBytes = &valBytes
- case "Mlocked:":
- m.Mlocked = &val
- m.MlockedBytes = &valBytes
- case "SwapTotal:":
- m.SwapTotal = &val
- m.SwapTotalBytes = &valBytes
- case "SwapFree:":
- m.SwapFree = &val
- m.SwapFreeBytes = &valBytes
- case "Dirty:":
- m.Dirty = &val
- m.DirtyBytes = &valBytes
- case "Writeback:":
- m.Writeback = &val
- m.WritebackBytes = &valBytes
- case "AnonPages:":
- m.AnonPages = &val
- m.AnonPagesBytes = &valBytes
- case "Mapped:":
- m.Mapped = &val
- m.MappedBytes = &valBytes
- case "Shmem:":
- m.Shmem = &val
- m.ShmemBytes = &valBytes
- case "Slab:":
- m.Slab = &val
- m.SlabBytes = &valBytes
- case "SReclaimable:":
- m.SReclaimable = &val
- m.SReclaimableBytes = &valBytes
- case "SUnreclaim:":
- m.SUnreclaim = &val
- m.SUnreclaimBytes = &valBytes
- case "KernelStack:":
- m.KernelStack = &val
- m.KernelStackBytes = &valBytes
- case "PageTables:":
- m.PageTables = &val
- m.PageTablesBytes = &valBytes
- case "NFS_Unstable:":
- m.NFSUnstable = &val
- m.NFSUnstableBytes = &valBytes
- case "Bounce:":
- m.Bounce = &val
- m.BounceBytes = &valBytes
- case "WritebackTmp:":
- m.WritebackTmp = &val
- m.WritebackTmpBytes = &valBytes
- case "CommitLimit:":
- m.CommitLimit = &val
- m.CommitLimitBytes = &valBytes
- case "Committed_AS:":
- m.CommittedAS = &val
- m.CommittedASBytes = &valBytes
- case "VmallocTotal:":
- m.VmallocTotal = &val
- m.VmallocTotalBytes = &valBytes
- case "VmallocUsed:":
- m.VmallocUsed = &val
- m.VmallocUsedBytes = &valBytes
- case "VmallocChunk:":
- m.VmallocChunk = &val
- m.VmallocChunkBytes = &valBytes
- case "Percpu:":
- m.Percpu = &val
- m.PercpuBytes = &valBytes
- case "HardwareCorrupted:":
- m.HardwareCorrupted = &val
- m.HardwareCorruptedBytes = &valBytes
- case "AnonHugePages:":
- m.AnonHugePages = &val
- m.AnonHugePagesBytes = &valBytes
- case "ShmemHugePages:":
- m.ShmemHugePages = &val
- m.ShmemHugePagesBytes = &valBytes
- case "ShmemPmdMapped:":
- m.ShmemPmdMapped = &val
- m.ShmemPmdMappedBytes = &valBytes
- case "CmaTotal:":
- m.CmaTotal = &val
- m.CmaTotalBytes = &valBytes
- case "CmaFree:":
- m.CmaFree = &val
- m.CmaFreeBytes = &valBytes
- case "HugePages_Total:":
- m.HugePagesTotal = &val
- case "HugePages_Free:":
- m.HugePagesFree = &val
- case "HugePages_Rsvd:":
- m.HugePagesRsvd = &val
- case "HugePages_Surp:":
- m.HugePagesSurp = &val
- case "Hugepagesize:":
- m.Hugepagesize = &val
- m.HugepagesizeBytes = &valBytes
- case "DirectMap4k:":
- m.DirectMap4k = &val
- m.DirectMap4kBytes = &valBytes
- case "DirectMap2M:":
- m.DirectMap2M = &val
- m.DirectMap2MBytes = &valBytes
- case "DirectMap1G:":
- m.DirectMap1G = &val
- m.DirectMap1GBytes = &valBytes
- }
- }
-
- return &m, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go
deleted file mode 100644
index a704c5e73..000000000
--- a/vendor/github.com/prometheus/procfs/mountinfo.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// A MountInfo is a type that describes the details, options
-// for each mount, parsed from /proc/self/mountinfo.
-// The fields described in each entry of /proc/self/mountinfo
-// is described in the following man page.
-// http://man7.org/linux/man-pages/man5/proc.5.html
-type MountInfo struct {
- // Unique ID for the mount
- MountID int
- // The ID of the parent mount
- ParentID int
- // The value of `st_dev` for the files on this FS
- MajorMinorVer string
- // The pathname of the directory in the FS that forms
- // the root for this mount
- Root string
- // The pathname of the mount point relative to the root
- MountPoint string
- // Mount options
- Options map[string]string
- // Zero or more optional fields
- OptionalFields map[string]string
- // The Filesystem type
- FSType string
- // FS specific information or "none"
- Source string
- // Superblock options
- SuperOptions map[string]string
-}
-
-// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs.
-func parseMountInfo(info []byte) ([]*MountInfo, error) {
- mounts := []*MountInfo{}
- scanner := bufio.NewScanner(bytes.NewReader(info))
- for scanner.Scan() {
- mountString := scanner.Text()
- parsedMounts, err := parseMountInfoString(mountString)
- if err != nil {
- return nil, err
- }
- mounts = append(mounts, parsedMounts)
- }
-
- err := scanner.Err()
- return mounts, err
-}
-
-// Parses a mountinfo file line, and converts it to a MountInfo struct.
-// An important check here is to see if the hyphen separator, as if it does not exist,
-// it means that the line is malformed.
-func parseMountInfoString(mountString string) (*MountInfo, error) {
- var err error
-
- mountInfo := strings.Split(mountString, " ")
- mountInfoLength := len(mountInfo)
- if mountInfoLength < 10 {
- return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString)
- }
-
- if mountInfo[mountInfoLength-4] != "-" {
- return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4])
- }
-
- mount := &MountInfo{
- MajorMinorVer: mountInfo[2],
- Root: mountInfo[3],
- MountPoint: mountInfo[4],
- Options: mountOptionsParser(mountInfo[5]),
- OptionalFields: nil,
- FSType: mountInfo[mountInfoLength-3],
- Source: mountInfo[mountInfoLength-2],
- SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]),
- }
-
- mount.MountID, err = strconv.Atoi(mountInfo[0])
- if err != nil {
- return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID)
- }
- mount.ParentID, err = strconv.Atoi(mountInfo[1])
- if err != nil {
- return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID)
- }
- // Has optional fields, which is a space separated list of values.
- // Example: shared:2 master:7
- if mountInfo[6] != "" {
- mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
- if err != nil {
- return nil, fmt.Errorf("%w: %w", ErrFileParse, err)
- }
- }
- return mount, nil
-}
-
-// mountOptionsIsValidField checks a string against a valid list of optional fields keys.
-func mountOptionsIsValidField(s string) bool {
- switch s {
- case
- "shared",
- "master",
- "propagate_from",
- "unbindable":
- return true
- }
- return false
-}
-
-// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings.
-func mountOptionsParseOptionalFields(o []string) (map[string]string, error) {
- optionalFields := make(map[string]string)
- for _, field := range o {
- optionSplit := strings.SplitN(field, ":", 2)
- value := ""
- if len(optionSplit) == 2 {
- value = optionSplit[1]
- }
- if mountOptionsIsValidField(optionSplit[0]) {
- optionalFields[optionSplit[0]] = value
- }
- }
- return optionalFields, nil
-}
-
-// mountOptionsParser parses the mount options, superblock options.
-func mountOptionsParser(mountOptions string) map[string]string {
- opts := make(map[string]string)
- options := strings.Split(mountOptions, ",")
- for _, opt := range options {
- splitOption := strings.Split(opt, "=")
- if len(splitOption) < 2 {
- key := splitOption[0]
- opts[key] = ""
- } else {
- key, value := splitOption[0], splitOption[1]
- opts[key] = value
- }
- }
- return opts
-}
-
-// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`.
-func GetMounts() ([]*MountInfo, error) {
- data, err := util.ReadFileNoStat("/proc/self/mountinfo")
- if err != nil {
- return nil, err
- }
- return parseMountInfo(data)
-}
-
-// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`.
-func GetProcMounts(pid int) ([]*MountInfo, error) {
- data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid))
- if err != nil {
- return nil, err
- }
- return parseMountInfo(data)
-}
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
deleted file mode 100644
index 75a3b6c81..000000000
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ /dev/null
@@ -1,707 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-// While implementing parsing of /proc/[pid]/mountstats, this blog was used
-// heavily as a reference:
-// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
-//
-// Special thanks to Chris Siebenmann for all of his posts explaining the
-// various statistics available for NFS.
-
-import (
- "bufio"
- "fmt"
- "io"
- "strconv"
- "strings"
- "time"
-)
-
-// Constants shared between multiple functions.
-const (
- deviceEntryLen = 8
-
- fieldBytesLen = 8
- fieldEventsLen = 27
-
- statVersion10 = "1.0"
- statVersion11 = "1.1"
-
- fieldTransport10TCPLen = 10
- fieldTransport10UDPLen = 7
-
- fieldTransport11TCPLen = 13
- fieldTransport11UDPLen = 10
-
- // kernel version >= 4.14 MaxLen
- // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
- fieldTransport11RDMAMaxLen = 28
-
- // kernel version <= 4.2 MinLen
- // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
- fieldTransport11RDMAMinLen = 20
-)
-
-// A Mount is a device mount parsed from /proc/[pid]/mountstats.
-type Mount struct {
- // Name of the device.
- Device string
- // The mount point of the device.
- Mount string
- // The filesystem type used by the device.
- Type string
- // If available additional statistics related to this Mount.
- // Use a type assertion to determine if additional statistics are available.
- Stats MountStats
-}
-
-// A MountStats is a type which contains detailed statistics for a specific
-// type of Mount.
-type MountStats interface {
- mountStats()
-}
-
-// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
-type MountStatsNFS struct {
- // The version of statistics provided.
- StatVersion string
- // The mount options of the NFS mount.
- Opts map[string]string
- // The age of the NFS mount.
- Age time.Duration
- // Statistics related to byte counters for various operations.
- Bytes NFSBytesStats
- // Statistics related to various NFS event occurrences.
- Events NFSEventsStats
- // Statistics broken down by filesystem operation.
- Operations []NFSOperationStats
- // Statistics about the NFS RPC transport.
- Transport []NFSTransportStats
-}
-
-// mountStats implements MountStats.
-func (m MountStatsNFS) mountStats() {}
-
-// A NFSBytesStats contains statistics about the number of bytes read and written
-// by an NFS client to and from an NFS server.
-type NFSBytesStats struct {
- // Number of bytes read using the read() syscall.
- Read uint64
- // Number of bytes written using the write() syscall.
- Write uint64
- // Number of bytes read using the read() syscall in O_DIRECT mode.
- DirectRead uint64
- // Number of bytes written using the write() syscall in O_DIRECT mode.
- DirectWrite uint64
- // Number of bytes read from the NFS server, in total.
- ReadTotal uint64
- // Number of bytes written to the NFS server, in total.
- WriteTotal uint64
- // Number of pages read directly via mmap()'d files.
- ReadPages uint64
- // Number of pages written directly via mmap()'d files.
- WritePages uint64
-}
-
-// A NFSEventsStats contains statistics about NFS event occurrences.
-type NFSEventsStats struct {
- // Number of times cached inode attributes are re-validated from the server.
- InodeRevalidate uint64
- // Number of times cached dentry nodes are re-validated from the server.
- DnodeRevalidate uint64
- // Number of times an inode cache is cleared.
- DataInvalidate uint64
- // Number of times cached inode attributes are invalidated.
- AttributeInvalidate uint64
- // Number of times files or directories have been open()'d.
- VFSOpen uint64
- // Number of times a directory lookup has occurred.
- VFSLookup uint64
- // Number of times permissions have been checked.
- VFSAccess uint64
- // Number of updates (and potential writes) to pages.
- VFSUpdatePage uint64
- // Number of pages read directly via mmap()'d files.
- VFSReadPage uint64
- // Number of times a group of pages have been read.
- VFSReadPages uint64
- // Number of pages written directly via mmap()'d files.
- VFSWritePage uint64
- // Number of times a group of pages have been written.
- VFSWritePages uint64
- // Number of times directory entries have been read with getdents().
- VFSGetdents uint64
- // Number of times attributes have been set on inodes.
- VFSSetattr uint64
- // Number of pending writes that have been forcefully flushed to the server.
- VFSFlush uint64
- // Number of times fsync() has been called on directories and files.
- VFSFsync uint64
- // Number of times locking has been attempted on a file.
- VFSLock uint64
- // Number of times files have been closed and released.
- VFSFileRelease uint64
- // Unknown. Possibly unused.
- CongestionWait uint64
- // Number of times files have been truncated.
- Truncation uint64
- // Number of times a file has been grown due to writes beyond its existing end.
- WriteExtension uint64
- // Number of times a file was removed while still open by another process.
- SillyRename uint64
- // Number of times the NFS server gave less data than expected while reading.
- ShortRead uint64
- // Number of times the NFS server wrote less data than expected while writing.
- ShortWrite uint64
- // Number of times the NFS server indicated EJUKEBOX; retrieving data from
- // offline storage.
- JukeboxDelay uint64
- // Number of NFS v4.1+ pNFS reads.
- PNFSRead uint64
- // Number of NFS v4.1+ pNFS writes.
- PNFSWrite uint64
-}
-
-// A NFSOperationStats contains statistics for a single operation.
-type NFSOperationStats struct {
- // The name of the operation.
- Operation string
- // Number of requests performed for this operation.
- Requests uint64
- // Number of times an actual RPC request has been transmitted for this operation.
- Transmissions uint64
- // Number of times a request has had a major timeout.
- MajorTimeouts uint64
- // Number of bytes sent for this operation, including RPC headers and payload.
- BytesSent uint64
- // Number of bytes received for this operation, including RPC headers and payload.
- BytesReceived uint64
- // Duration all requests spent queued for transmission before they were sent.
- CumulativeQueueMilliseconds uint64
- // Duration it took to get a reply back after the request was transmitted.
- CumulativeTotalResponseMilliseconds uint64
- // Duration from when a request was enqueued to when it was completely handled.
- CumulativeTotalRequestMilliseconds uint64
- // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
- Errors uint64
-}
-
-// A NFSTransportStats contains statistics for the NFS mount RPC requests and
-// responses.
-type NFSTransportStats struct {
- // The transport protocol used for the NFS mount.
- Protocol string
- // The local port used for the NFS mount.
- Port uint64
- // Number of times the client has had to establish a connection from scratch
- // to the NFS server.
- Bind uint64
- // Number of times the client has made a TCP connection to the NFS server.
- Connect uint64
- // Duration (in jiffies, a kernel internal unit of time) the NFS mount has
- // spent waiting for connections to the server to be established.
- ConnectIdleTime uint64
- // Duration since the NFS mount last saw any RPC traffic.
- IdleTimeSeconds uint64
- // Number of RPC requests for this mount sent to the NFS server.
- Sends uint64
- // Number of RPC responses for this mount received from the NFS server.
- Receives uint64
- // Number of times the NFS server sent a response with a transaction ID
- // unknown to this client.
- BadTransactionIDs uint64
- // A running counter, incremented on each request as the current difference
- // ebetween sends and receives.
- CumulativeActiveRequests uint64
- // A running counter, incremented on each request by the current backlog
- // queue size.
- CumulativeBacklog uint64
-
- // Stats below only available with stat version 1.1.
-
- // Maximum number of simultaneously active RPC requests ever used.
- MaximumRPCSlotsUsed uint64
- // A running counter, incremented on each request as the current size of the
- // sending queue.
- CumulativeSendingQueue uint64
- // A running counter, incremented on each request as the current size of the
- // pending queue.
- CumulativePendingQueue uint64
-
- // Stats below only available with stat version 1.1.
- // Transport over RDMA
-
- // accessed when sending a call
- ReadChunkCount uint64
- WriteChunkCount uint64
- ReplyChunkCount uint64
- TotalRdmaRequest uint64
-
- // rarely accessed error counters
- PullupCopyCount uint64
- HardwayRegisterCount uint64
- FailedMarshalCount uint64
- BadReplyCount uint64
- MrsRecovered uint64
- MrsOrphaned uint64
- MrsAllocated uint64
- EmptySendctxQ uint64
-
- // accessed when receiving a reply
- TotalRdmaReply uint64
- FixupCopyCount uint64
- ReplyWaitsForSend uint64
- LocalInvNeeded uint64
- NomsgCallCount uint64
- BcallCount uint64
-}
-
-// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
-// of Mount structures containing detailed information about each mount.
-// If available, statistics for each mount are parsed as well.
-func parseMountStats(r io.Reader) ([]*Mount, error) {
- const (
- device = "device"
- statVersionPrefix = "statvers="
-
- nfs3Type = "nfs"
- nfs4Type = "nfs4"
- )
-
- var mounts []*Mount
-
- s := bufio.NewScanner(r)
- for s.Scan() {
- // Only look for device entries in this function
- ss := strings.Fields(string(s.Bytes()))
- if len(ss) == 0 || ss[0] != device {
- continue
- }
-
- m, err := parseMount(ss)
- if err != nil {
- return nil, err
- }
-
- // Does this mount also possess statistics information?
- if len(ss) > deviceEntryLen {
- // Only NFSv3 and v4 are supported for parsing statistics
- if m.Type != nfs3Type && m.Type != nfs4Type {
- return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type)
- }
-
- statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
-
- stats, err := parseMountStatsNFS(s, statVersion)
- if err != nil {
- return nil, err
- }
-
- m.Stats = stats
- }
-
- mounts = append(mounts, m)
- }
-
- return mounts, s.Err()
-}
-
-// parseMount parses an entry in /proc/[pid]/mountstats in the format:
-//
-// device [device] mounted on [mount] with fstype [type]
-func parseMount(ss []string) (*Mount, error) {
- if len(ss) < deviceEntryLen {
- return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
- }
-
- // Check for specific words appearing at specific indices to ensure
- // the format is consistent with what we expect
- format := []struct {
- i int
- s string
- }{
- {i: 0, s: "device"},
- {i: 2, s: "mounted"},
- {i: 3, s: "on"},
- {i: 5, s: "with"},
- {i: 6, s: "fstype"},
- }
-
- for _, f := range format {
- if ss[f.i] != f.s {
- return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
- }
- }
-
- return &Mount{
- Device: ss[1],
- Mount: ss[4],
- Type: ss[7],
- }, nil
-}
-
-// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
-// related to NFS statistics.
-func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
- // Field indicators for parsing specific types of data
- const (
- fieldOpts = "opts:"
- fieldAge = "age:"
- fieldBytes = "bytes:"
- fieldEvents = "events:"
- fieldPerOpStats = "per-op"
- fieldTransport = "xprt:"
- )
-
- stats := &MountStatsNFS{
- StatVersion: statVersion,
- }
-
- for s.Scan() {
- ss := strings.Fields(string(s.Bytes()))
- if len(ss) == 0 {
- break
- }
-
- switch ss[0] {
- case fieldOpts:
- if len(ss) < 2 {
- return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
- }
- if stats.Opts == nil {
- stats.Opts = map[string]string{}
- }
- for _, opt := range strings.Split(ss[1], ",") {
- split := strings.Split(opt, "=")
- if len(split) == 2 {
- stats.Opts[split[0]] = split[1]
- } else {
- stats.Opts[opt] = ""
- }
- }
- case fieldAge:
- if len(ss) < 2 {
- return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
- }
- // Age integer is in seconds
- d, err := time.ParseDuration(ss[1] + "s")
- if err != nil {
- return nil, err
- }
-
- stats.Age = d
- case fieldBytes:
- if len(ss) < 2 {
- return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
- }
- bstats, err := parseNFSBytesStats(ss[1:])
- if err != nil {
- return nil, err
- }
-
- stats.Bytes = *bstats
- case fieldEvents:
- if len(ss) < 2 {
- return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss)
- }
- estats, err := parseNFSEventsStats(ss[1:])
- if err != nil {
- return nil, err
- }
-
- stats.Events = *estats
- case fieldTransport:
- if len(ss) < 3 {
- return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss)
- }
-
- tstats, err := parseNFSTransportStats(ss[1:], statVersion)
- if err != nil {
- return nil, err
- }
-
- stats.Transport = append(stats.Transport, *tstats)
- }
-
- // When encountering "per-operation statistics", we must break this
- // loop and parse them separately to ensure we can terminate parsing
- // before reaching another device entry; hence why this 'if' statement
- // is not just another switch case
- if ss[0] == fieldPerOpStats {
- break
- }
- }
-
- if err := s.Err(); err != nil {
- return nil, err
- }
-
- // NFS per-operation stats appear last before the next device entry
- perOpStats, err := parseNFSOperationStats(s)
- if err != nil {
- return nil, err
- }
-
- stats.Operations = perOpStats
-
- return stats, nil
-}
-
-// parseNFSBytesStats parses a NFSBytesStats line using an input set of
-// integer fields.
-func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
- if len(ss) != fieldBytesLen {
- return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss)
- }
-
- ns := make([]uint64, 0, fieldBytesLen)
- for _, s := range ss {
- n, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return nil, err
- }
-
- ns = append(ns, n)
- }
-
- return &NFSBytesStats{
- Read: ns[0],
- Write: ns[1],
- DirectRead: ns[2],
- DirectWrite: ns[3],
- ReadTotal: ns[4],
- WriteTotal: ns[5],
- ReadPages: ns[6],
- WritePages: ns[7],
- }, nil
-}
-
-// parseNFSEventsStats parses a NFSEventsStats line using an input set of
-// integer fields.
-func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
- if len(ss) != fieldEventsLen {
- return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss)
- }
-
- ns := make([]uint64, 0, fieldEventsLen)
- for _, s := range ss {
- n, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return nil, err
- }
-
- ns = append(ns, n)
- }
-
- return &NFSEventsStats{
- InodeRevalidate: ns[0],
- DnodeRevalidate: ns[1],
- DataInvalidate: ns[2],
- AttributeInvalidate: ns[3],
- VFSOpen: ns[4],
- VFSLookup: ns[5],
- VFSAccess: ns[6],
- VFSUpdatePage: ns[7],
- VFSReadPage: ns[8],
- VFSReadPages: ns[9],
- VFSWritePage: ns[10],
- VFSWritePages: ns[11],
- VFSGetdents: ns[12],
- VFSSetattr: ns[13],
- VFSFlush: ns[14],
- VFSFsync: ns[15],
- VFSLock: ns[16],
- VFSFileRelease: ns[17],
- CongestionWait: ns[18],
- Truncation: ns[19],
- WriteExtension: ns[20],
- SillyRename: ns[21],
- ShortRead: ns[22],
- ShortWrite: ns[23],
- JukeboxDelay: ns[24],
- PNFSRead: ns[25],
- PNFSWrite: ns[26],
- }, nil
-}
-
-// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
-// additional information about per-operation statistics until an empty
-// line is reached.
-func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
- const (
- // Minimum number of expected fields in each per-operation statistics set
- minFields = 9
- )
-
- var ops []NFSOperationStats
-
- for s.Scan() {
- ss := strings.Fields(string(s.Bytes()))
- if len(ss) == 0 {
- // Must break when reading a blank line after per-operation stats to
- // enable top-level function to parse the next device entry
- break
- }
-
- if len(ss) < minFields {
- return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss)
- }
-
- // Skip string operation name for integers
- ns := make([]uint64, 0, minFields-1)
- for _, st := range ss[1:] {
- n, err := strconv.ParseUint(st, 10, 64)
- if err != nil {
- return nil, err
- }
-
- ns = append(ns, n)
- }
- opStats := NFSOperationStats{
- Operation: strings.TrimSuffix(ss[0], ":"),
- Requests: ns[0],
- Transmissions: ns[1],
- MajorTimeouts: ns[2],
- BytesSent: ns[3],
- BytesReceived: ns[4],
- CumulativeQueueMilliseconds: ns[5],
- CumulativeTotalResponseMilliseconds: ns[6],
- CumulativeTotalRequestMilliseconds: ns[7],
- }
-
- if len(ns) > 8 {
- opStats.Errors = ns[8]
- }
-
- ops = append(ops, opStats)
- }
-
- return ops, s.Err()
-}
-
-// parseNFSTransportStats parses a NFSTransportStats line using an input set of
-// integer fields matched to a specific stats version.
-func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
- // Extract the protocol field. It is the only string value in the line
- protocol := ss[0]
- ss = ss[1:]
-
- switch statVersion {
- case statVersion10:
- var expectedLength int
- if protocol == "tcp" {
- expectedLength = fieldTransport10TCPLen
- } else if protocol == "udp" {
- expectedLength = fieldTransport10UDPLen
- } else {
- return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
- }
- if len(ss) != expectedLength {
- return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss)
- }
- case statVersion11:
- var expectedLength int
- if protocol == "tcp" {
- expectedLength = fieldTransport11TCPLen
- } else if protocol == "udp" {
- expectedLength = fieldTransport11UDPLen
- } else if protocol == "rdma" {
- expectedLength = fieldTransport11RDMAMinLen
- } else {
- return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
- }
- if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
- (protocol == "rdma" && len(ss) < expectedLength) {
- return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
- }
- default:
- return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
- }
-
- // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
- // in a v1.0 response. Since the stat length is bigger for TCP stats, we use
- // the TCP length here.
- //
- // Note: slice length must be set to length of v1.1 stats to avoid a panic when
- // only v1.0 stats are present.
- // See: https://github.com/prometheus/node_exporter/issues/571.
- //
- // Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen
- ns := make([]uint64, fieldTransport11RDMAMaxLen+3)
- for i, s := range ss {
- n, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return nil, err
- }
-
- ns[i] = n
- }
-
- // The fields differ depending on the transport protocol (TCP or UDP)
- // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt
- //
- // For the udp RPC transport there is no connection count, connect idle time,
- // or idle time (fields #3, #4, and #5); all other fields are the same. So
- // we set them to 0 here.
- if protocol == "udp" {
- ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
- } else if protocol == "tcp" {
- ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
- } else if protocol == "rdma" {
- ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
- }
-
- return &NFSTransportStats{
- // NFS xprt over tcp or udp
- Protocol: protocol,
- Port: ns[0],
- Bind: ns[1],
- Connect: ns[2],
- ConnectIdleTime: ns[3],
- IdleTimeSeconds: ns[4],
- Sends: ns[5],
- Receives: ns[6],
- BadTransactionIDs: ns[7],
- CumulativeActiveRequests: ns[8],
- CumulativeBacklog: ns[9],
-
- // NFS xprt over tcp or udp
- // And statVersion 1.1
- MaximumRPCSlotsUsed: ns[10],
- CumulativeSendingQueue: ns[11],
- CumulativePendingQueue: ns[12],
-
- // NFS xprt over rdma
- // And stat Version 1.1
- ReadChunkCount: ns[13],
- WriteChunkCount: ns[14],
- ReplyChunkCount: ns[15],
- TotalRdmaRequest: ns[16],
- PullupCopyCount: ns[17],
- HardwayRegisterCount: ns[18],
- FailedMarshalCount: ns[19],
- BadReplyCount: ns[20],
- MrsRecovered: ns[21],
- MrsOrphaned: ns[22],
- MrsAllocated: ns[23],
- EmptySendctxQ: ns[24],
- TotalRdmaReply: ns[25],
- FixupCopyCount: ns[26],
- ReplyWaitsForSend: ns[27],
- LocalInvNeeded: ns[28],
- NomsgCallCount: ns[29],
- BcallCount: ns[30],
- }, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
deleted file mode 100644
index 316df5fbb..000000000
--- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
-// and contains netfilter conntrack statistics at one CPU core.
-type ConntrackStatEntry struct {
- Entries uint64
- Searched uint64
- Found uint64
- New uint64
- Invalid uint64
- Ignore uint64
- Delete uint64
- DeleteList uint64
- Insert uint64
- InsertFailed uint64
- Drop uint64
- EarlyDrop uint64
- SearchRestart uint64
-}
-
-// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores.
-func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
- return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
-}
-
-// Parses a slice of ConntrackStatEntries from the given filepath.
-func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
- // This file is small and can be read with one syscall.
- b, err := util.ReadFileNoStat(path)
- if err != nil {
- // Do not wrap this error so the caller can detect os.IsNotExist and
- // similar conditions.
- return nil, err
- }
-
- stat, err := parseConntrackStat(bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err)
- }
-
- return stat, nil
-}
-
-// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries.
-func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
- var entries []ConntrackStatEntry
-
- scanner := bufio.NewScanner(r)
- scanner.Scan()
- for scanner.Scan() {
- fields := strings.Fields(scanner.Text())
- conntrackEntry, err := parseConntrackStatEntry(fields)
- if err != nil {
- return nil, err
- }
- entries = append(entries, *conntrackEntry)
- }
-
- return entries, nil
-}
-
-// Parses a ConntrackStatEntry from given array of fields.
-func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
- entries, err := util.ParseHexUint64s(fields)
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
- }
- numEntries := len(entries)
- if numEntries < 16 || numEntries > 17 {
- return nil,
- fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries)
- }
-
- stats := &ConntrackStatEntry{
- Entries: *entries[0],
- Searched: *entries[1],
- Found: *entries[2],
- New: *entries[3],
- Invalid: *entries[4],
- Ignore: *entries[5],
- Delete: *entries[6],
- DeleteList: *entries[7],
- Insert: *entries[8],
- InsertFailed: *entries[9],
- Drop: *entries[10],
- EarlyDrop: *entries[11],
- }
-
- // Ignore missing search_restart on Linux < 2.6.35.
- if numEntries == 17 {
- stats.SearchRestart = *entries[16]
- }
-
- return stats, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go
deleted file mode 100644
index e66208aa0..000000000
--- a/vendor/github.com/prometheus/procfs/net_dev.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "errors"
- "os"
- "sort"
- "strconv"
- "strings"
-)
-
-// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev.
-type NetDevLine struct {
- Name string `json:"name"` // The name of the interface.
- RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received.
- RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received.
- RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered.
- RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving.
- RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors.
- RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors.
- RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver.
- RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver.
- TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted.
- TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted.
- TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered.
- TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting.
- TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors.
- TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface.
- TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver.
- TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver.
-}
-
-// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys
-// are interface names.
-type NetDev map[string]NetDevLine
-
-// NetDev returns kernel/system statistics read from /proc/net/dev.
-func (fs FS) NetDev() (NetDev, error) {
- return newNetDev(fs.proc.Path("net/dev"))
-}
-
-// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
-func (p Proc) NetDev() (NetDev, error) {
- return newNetDev(p.path("net/dev"))
-}
-
-// newNetDev creates a new NetDev from the contents of the given file.
-func newNetDev(file string) (NetDev, error) {
- f, err := os.Open(file)
- if err != nil {
- return NetDev{}, err
- }
- defer f.Close()
-
- netDev := NetDev{}
- s := bufio.NewScanner(f)
- for n := 0; s.Scan(); n++ {
- // Skip the 2 header lines.
- if n < 2 {
- continue
- }
-
- line, err := netDev.parseLine(s.Text())
- if err != nil {
- return netDev, err
- }
-
- netDev[line.Name] = *line
- }
-
- return netDev, s.Err()
-}
-
-// parseLine parses a single line from the /proc/net/dev file. Header lines
-// must be filtered prior to calling this method.
-func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
- idx := strings.LastIndex(rawLine, ":")
- if idx == -1 {
- return nil, errors.New("invalid net/dev line, missing colon")
- }
- fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:]))
-
- var err error
- line := &NetDevLine{}
-
- // Interface Name
- line.Name = strings.TrimSpace(rawLine[:idx])
- if line.Name == "" {
- return nil, errors.New("invalid net/dev line, empty interface name")
- }
-
- // RX
- line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64)
- if err != nil {
- return nil, err
- }
-
- // TX
- line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64)
- if err != nil {
- return nil, err
- }
-
- return line, nil
-}
-
-// Total aggregates the values across interfaces and returns a new NetDevLine.
-// The Name field will be a sorted comma separated list of interface names.
-func (netDev NetDev) Total() NetDevLine {
- total := NetDevLine{}
-
- names := make([]string, 0, len(netDev))
- for _, ifc := range netDev {
- names = append(names, ifc.Name)
- total.RxBytes += ifc.RxBytes
- total.RxPackets += ifc.RxPackets
- total.RxErrors += ifc.RxErrors
- total.RxDropped += ifc.RxDropped
- total.RxFIFO += ifc.RxFIFO
- total.RxFrame += ifc.RxFrame
- total.RxCompressed += ifc.RxCompressed
- total.RxMulticast += ifc.RxMulticast
- total.TxBytes += ifc.TxBytes
- total.TxPackets += ifc.TxPackets
- total.TxErrors += ifc.TxErrors
- total.TxDropped += ifc.TxDropped
- total.TxFIFO += ifc.TxFIFO
- total.TxCollisions += ifc.TxCollisions
- total.TxCarrier += ifc.TxCarrier
- total.TxCompressed += ifc.TxCompressed
- }
- sort.Strings(names)
- total.Name = strings.Join(names, ", ")
-
- return total
-}
diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go
deleted file mode 100644
index b70f1fc7a..000000000
--- a/vendor/github.com/prometheus/procfs/net_ip_socket.go
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "encoding/hex"
- "fmt"
- "io"
- "net"
- "os"
- "strconv"
- "strings"
-)
-
-const (
- // readLimit is used by io.LimitReader while reading the content of the
- // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
- // as each line represents a single used socket.
- // In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
- // With e.g. 150 Byte per line and the maximum number of 65535,
- // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
- readLimit = 4294967296 // Byte -> 4 GiB
-)
-
-// This contains generic data structures for both udp and tcp sockets.
-type (
- // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header.
- NetIPSocket []*netIPSocketLine
-
- // NetIPSocketSummary provides already computed values like the total queue lengths or
- // the total number of used sockets. In contrast to NetIPSocket it does not collect
- // the parsed lines into a slice.
- NetIPSocketSummary struct {
- // TxQueueLength shows the total queue length of all parsed tx_queue lengths.
- TxQueueLength uint64
- // RxQueueLength shows the total queue length of all parsed rx_queue lengths.
- RxQueueLength uint64
- // UsedSockets shows the total number of parsed lines representing the
- // number of used sockets.
- UsedSockets uint64
- // Drops shows the total number of dropped packets of all UPD sockets.
- Drops *uint64
- }
-
- // netIPSocketLine represents the fields parsed from a single line
- // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
- // Drops is non-nil for udp{,6}, but nil for tcp{,6}.
- // For the proc file format details, see https://linux.die.net/man/5/proc.
- netIPSocketLine struct {
- Sl uint64
- LocalAddr net.IP
- LocalPort uint64
- RemAddr net.IP
- RemPort uint64
- St uint64
- TxQueue uint64
- RxQueue uint64
- UID uint64
- Inode uint64
- Drops *uint64
- }
-)
-
-func newNetIPSocket(file string) (NetIPSocket, error) {
- f, err := os.Open(file)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- var netIPSocket NetIPSocket
- isUDP := strings.Contains(file, "udp")
-
- lr := io.LimitReader(f, readLimit)
- s := bufio.NewScanner(lr)
- s.Scan() // skip first line with headers
- for s.Scan() {
- fields := strings.Fields(s.Text())
- line, err := parseNetIPSocketLine(fields, isUDP)
- if err != nil {
- return nil, err
- }
- netIPSocket = append(netIPSocket, line)
- }
- if err := s.Err(); err != nil {
- return nil, err
- }
- return netIPSocket, nil
-}
-
-// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file.
-func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
- f, err := os.Open(file)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- var netIPSocketSummary NetIPSocketSummary
- var udpPacketDrops uint64
- isUDP := strings.Contains(file, "udp")
-
- lr := io.LimitReader(f, readLimit)
- s := bufio.NewScanner(lr)
- s.Scan() // skip first line with headers
- for s.Scan() {
- fields := strings.Fields(s.Text())
- line, err := parseNetIPSocketLine(fields, isUDP)
- if err != nil {
- return nil, err
- }
- netIPSocketSummary.TxQueueLength += line.TxQueue
- netIPSocketSummary.RxQueueLength += line.RxQueue
- netIPSocketSummary.UsedSockets++
- if isUDP {
- udpPacketDrops += *line.Drops
- netIPSocketSummary.Drops = &udpPacketDrops
- }
- }
- if err := s.Err(); err != nil {
- return nil, err
- }
- return &netIPSocketSummary, nil
-}
-
-// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order.
-
-func parseIP(hexIP string) (net.IP, error) {
- var byteIP []byte
- byteIP, err := hex.DecodeString(hexIP)
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
- }
- switch len(byteIP) {
- case 4:
- return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil
- case 16:
- i := net.IP{
- byteIP[3], byteIP[2], byteIP[1], byteIP[0],
- byteIP[7], byteIP[6], byteIP[5], byteIP[4],
- byteIP[11], byteIP[10], byteIP[9], byteIP[8],
- byteIP[15], byteIP[14], byteIP[13], byteIP[12],
- }
- return i, nil
- default:
- return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil)
- }
-}
-
-// parseNetIPSocketLine parses a single line, represented by a list of fields.
-func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) {
- line := &netIPSocketLine{}
- if len(fields) < 10 {
- return nil, fmt.Errorf(
- "%w: Less than 10 columns found %q",
- ErrFileParse,
- strings.Join(fields, " "),
- )
- }
- var err error // parse error
-
- // sl
- s := strings.Split(fields[0], ":")
- if len(s) != 2 {
- return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0])
- }
-
- if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
- return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
- }
- // local_address
- l := strings.Split(fields[1], ":")
- if len(l) != 2 {
- return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1])
- }
- if line.LocalAddr, err = parseIP(l[0]); err != nil {
- return nil, err
- }
- if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
- return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
- }
-
- // remote_address
- r := strings.Split(fields[2], ":")
- if len(r) != 2 {
- return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1])
- }
- if line.RemAddr, err = parseIP(r[0]); err != nil {
- return nil, err
- }
- if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
- return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
- }
-
- // st
- if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
- return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
- }
-
- // tx_queue and rx_queue
- q := strings.Split(fields[4], ":")
- if len(q) != 2 {
- return nil, fmt.Errorf(
- "%w: Missing colon for tx/rx queues in socket line %q",
- ErrFileParse,
- fields[4],
- )
- }
- if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
- return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
- }
- if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
- return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
- }
-
- // uid
- if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
- return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
- }
-
- // inode
- if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
- return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
- }
-
- // drops
- if isUDP {
- drops, err := strconv.ParseUint(fields[12], 0, 64)
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
- }
- line.Drops = &drops
- }
-
- return line, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go
deleted file mode 100644
index b6c77b709..000000000
--- a/vendor/github.com/prometheus/procfs/net_protocols.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// NetProtocolStats stores the contents from /proc/net/protocols.
-type NetProtocolStats map[string]NetProtocolStatLine
-
-// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We
-// only care about the first six columns as the rest are not likely to change
-// and only serve to provide a set of capabilities for each protocol.
-type NetProtocolStatLine struct {
- Name string // 0 The name of the protocol
- Size uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock)
- Sockets int64 // 2 Number of sockets in use by this protocol
- Memory int64 // 3 Number of 4KB pages allocated by all sockets of this protocol
- Pressure int // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure.
- MaxHeader uint64 // 5 Protocol specific max header size
- Slab bool // 6 Indicates whether or not memory is allocated from the SLAB
- ModuleName string // 7 The name of the module that implemented this protocol or "kernel" if not from a module
- Capabilities NetProtocolCapabilities
-}
-
-// NetProtocolCapabilities contains a list of capabilities for each protocol.
-type NetProtocolCapabilities struct {
- Close bool // 8
- Connect bool // 9
- Disconnect bool // 10
- Accept bool // 11
- IoCtl bool // 12
- Init bool // 13
- Destroy bool // 14
- Shutdown bool // 15
- SetSockOpt bool // 16
- GetSockOpt bool // 17
- SendMsg bool // 18
- RecvMsg bool // 19
- SendPage bool // 20
- Bind bool // 21
- BacklogRcv bool // 22
- Hash bool // 23
- UnHash bool // 24
- GetPort bool // 25
- EnterMemoryPressure bool // 26
-}
-
-// NetProtocols reads stats from /proc/net/protocols and returns a map of
-// PortocolStatLine entries. As of this writing no official Linux Documentation
-// exists, however the source is fairly self-explanatory and the format seems
-// stable since its introduction in 2.6.12-rc2
-// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452
-// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586
-func (fs FS) NetProtocols() (NetProtocolStats, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols"))
- if err != nil {
- return NetProtocolStats{}, err
- }
- return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data)))
-}
-
-func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) {
- nps := NetProtocolStats{}
-
- // Skip the header line
- s.Scan()
-
- for s.Scan() {
- line, err := nps.parseLine(s.Text())
- if err != nil {
- return NetProtocolStats{}, err
- }
-
- nps[line.Name] = *line
- }
- return nps, nil
-}
-
-func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) {
- line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}}
- var err error
- const enabled = "yes"
- const disabled = "no"
-
- fields := strings.Fields(rawLine)
- line.Name = fields[0]
- line.Size, err = strconv.ParseUint(fields[1], 10, 64)
- if err != nil {
- return nil, err
- }
- line.Sockets, err = strconv.ParseInt(fields[2], 10, 64)
- if err != nil {
- return nil, err
- }
- line.Memory, err = strconv.ParseInt(fields[3], 10, 64)
- if err != nil {
- return nil, err
- }
- if fields[4] == enabled {
- line.Pressure = 1
- } else if fields[4] == disabled {
- line.Pressure = 0
- } else {
- line.Pressure = -1
- }
- line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
- if err != nil {
- return nil, err
- }
- if fields[6] == enabled {
- line.Slab = true
- } else if fields[6] == disabled {
- line.Slab = false
- } else {
- return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
- }
- line.ModuleName = fields[7]
-
- err = line.Capabilities.parseCapabilities(fields[8:])
- if err != nil {
- return nil, err
- }
-
- return line, nil
-}
-
-func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error {
- // The capabilities are all bools so we can loop over to map them
- capabilityFields := [...]*bool{
- &pc.Close,
- &pc.Connect,
- &pc.Disconnect,
- &pc.Accept,
- &pc.IoCtl,
- &pc.Init,
- &pc.Destroy,
- &pc.Shutdown,
- &pc.SetSockOpt,
- &pc.GetSockOpt,
- &pc.SendMsg,
- &pc.RecvMsg,
- &pc.SendPage,
- &pc.Bind,
- &pc.BacklogRcv,
- &pc.Hash,
- &pc.UnHash,
- &pc.GetPort,
- &pc.EnterMemoryPressure,
- }
-
- for i := 0; i < len(capabilities); i++ {
- if capabilities[i] == "y" {
- *capabilityFields[i] = true
- } else if capabilities[i] == "n" {
- *capabilityFields[i] = false
- } else {
- return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
- }
- }
- return nil
-}
diff --git a/vendor/github.com/prometheus/procfs/net_route.go b/vendor/github.com/prometheus/procfs/net_route.go
deleted file mode 100644
index deb7029fe..000000000
--- a/vendor/github.com/prometheus/procfs/net_route.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2023 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-const (
- blackholeRepresentation string = "*"
- blackholeIfaceName string = "blackhole"
- routeLineColumns int = 11
-)
-
-// A NetRouteLine represents one line from net/route.
-type NetRouteLine struct {
- Iface string
- Destination uint32
- Gateway uint32
- Flags uint32
- RefCnt uint32
- Use uint32
- Metric uint32
- Mask uint32
- MTU uint32
- Window uint32
- IRTT uint32
-}
-
-func (fs FS) NetRoute() ([]NetRouteLine, error) {
- return readNetRoute(fs.proc.Path("net", "route"))
-}
-
-func readNetRoute(path string) ([]NetRouteLine, error) {
- b, err := util.ReadFileNoStat(path)
- if err != nil {
- return nil, err
- }
-
- routelines, err := parseNetRoute(bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("failed to read net route from %s: %w", path, err)
- }
- return routelines, nil
-}
-
-func parseNetRoute(r io.Reader) ([]NetRouteLine, error) {
- var routelines []NetRouteLine
-
- scanner := bufio.NewScanner(r)
- scanner.Scan()
- for scanner.Scan() {
- fields := strings.Fields(scanner.Text())
- routeline, err := parseNetRouteLine(fields)
- if err != nil {
- return nil, err
- }
- routelines = append(routelines, *routeline)
- }
- return routelines, nil
-}
-
-func parseNetRouteLine(fields []string) (*NetRouteLine, error) {
- if len(fields) != routeLineColumns {
- return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields))
- }
- iface := fields[0]
- if iface == blackholeRepresentation {
- iface = blackholeIfaceName
- }
- destination, err := strconv.ParseUint(fields[1], 16, 32)
- if err != nil {
- return nil, err
- }
- gateway, err := strconv.ParseUint(fields[2], 16, 32)
- if err != nil {
- return nil, err
- }
- flags, err := strconv.ParseUint(fields[3], 10, 32)
- if err != nil {
- return nil, err
- }
- refcnt, err := strconv.ParseUint(fields[4], 10, 32)
- if err != nil {
- return nil, err
- }
- use, err := strconv.ParseUint(fields[5], 10, 32)
- if err != nil {
- return nil, err
- }
- metric, err := strconv.ParseUint(fields[6], 10, 32)
- if err != nil {
- return nil, err
- }
- mask, err := strconv.ParseUint(fields[7], 16, 32)
- if err != nil {
- return nil, err
- }
- mtu, err := strconv.ParseUint(fields[8], 10, 32)
- if err != nil {
- return nil, err
- }
- window, err := strconv.ParseUint(fields[9], 10, 32)
- if err != nil {
- return nil, err
- }
- irtt, err := strconv.ParseUint(fields[10], 10, 32)
- if err != nil {
- return nil, err
- }
- routeline := &NetRouteLine{
- Iface: iface,
- Destination: uint32(destination),
- Gateway: uint32(gateway),
- Flags: uint32(flags),
- RefCnt: uint32(refcnt),
- Use: uint32(use),
- Metric: uint32(metric),
- Mask: uint32(mask),
- MTU: uint32(mtu),
- Window: uint32(window),
- IRTT: uint32(irtt),
- }
- return routeline, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go
deleted file mode 100644
index fae62b13d..000000000
--- a/vendor/github.com/prometheus/procfs/net_sockstat.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6,
-// respectively.
-type NetSockstat struct {
- // Used is non-nil for IPv4 sockstat results, but nil for IPv6.
- Used *int
- Protocols []NetSockstatProtocol
-}
-
-// A NetSockstatProtocol contains statistics about a given socket protocol.
-// Pointer fields indicate that the value may or may not be present on any
-// given protocol.
-type NetSockstatProtocol struct {
- Protocol string
- InUse int
- Orphan *int
- TW *int
- Alloc *int
- Mem *int
- Memory *int
-}
-
-// NetSockstat retrieves IPv4 socket statistics.
-func (fs FS) NetSockstat() (*NetSockstat, error) {
- return readSockstat(fs.proc.Path("net", "sockstat"))
-}
-
-// NetSockstat6 retrieves IPv6 socket statistics.
-//
-// If IPv6 is disabled on this kernel, the returned error can be checked with
-// os.IsNotExist.
-func (fs FS) NetSockstat6() (*NetSockstat, error) {
- return readSockstat(fs.proc.Path("net", "sockstat6"))
-}
-
-// readSockstat opens and parses a NetSockstat from the input file.
-func readSockstat(name string) (*NetSockstat, error) {
- // This file is small and can be read with one syscall.
- b, err := util.ReadFileNoStat(name)
- if err != nil {
- // Do not wrap this error so the caller can detect os.IsNotExist and
- // similar conditions.
- return nil, err
- }
-
- stat, err := parseSockstat(bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err)
- }
-
- return stat, nil
-}
-
-// parseSockstat reads the contents of a sockstat file and parses a NetSockstat.
-func parseSockstat(r io.Reader) (*NetSockstat, error) {
- var stat NetSockstat
- s := bufio.NewScanner(r)
- for s.Scan() {
- // Expect a minimum of a protocol and one key/value pair.
- fields := strings.Split(s.Text(), " ")
- if len(fields) < 3 {
- return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text())
- }
-
- // The remaining fields are key/value pairs.
- kvs, err := parseSockstatKVs(fields[1:])
- if err != nil {
- return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
- }
-
- // The first field is the protocol. We must trim its colon suffix.
- proto := strings.TrimSuffix(fields[0], ":")
- switch proto {
- case "sockets":
- // Special case: IPv4 has a sockets "used" key/value pair that we
- // embed at the top level of the structure.
- used := kvs["used"]
- stat.Used = &used
- default:
- // Parse all other lines as individual protocols.
- nsp := parseSockstatProtocol(kvs)
- nsp.Protocol = proto
- stat.Protocols = append(stat.Protocols, nsp)
- }
- }
-
- if err := s.Err(); err != nil {
- return nil, err
- }
-
- return &stat, nil
-}
-
-// parseSockstatKVs parses a string slice into a map of key/value pairs.
-func parseSockstatKVs(kvs []string) (map[string]int, error) {
- if len(kvs)%2 != 0 {
- return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs)
- }
-
- // Iterate two values at a time to gather key/value pairs.
- out := make(map[string]int, len(kvs)/2)
- for i := 0; i < len(kvs); i += 2 {
- vp := util.NewValueParser(kvs[i+1])
- out[kvs[i]] = vp.Int()
-
- if err := vp.Err(); err != nil {
- return nil, err
- }
- }
-
- return out, nil
-}
-
-// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map.
-func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol {
- var nsp NetSockstatProtocol
- for k, v := range kvs {
- // Capture the range variable to ensure we get unique pointers for
- // each of the optional fields.
- v := v
- switch k {
- case "inuse":
- nsp.InUse = v
- case "orphan":
- nsp.Orphan = &v
- case "tw":
- nsp.TW = &v
- case "alloc":
- nsp.Alloc = &v
- case "mem":
- nsp.Mem = &v
- case "memory":
- nsp.Memory = &v
- }
- }
-
- return nsp
-}
diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go
deleted file mode 100644
index 71c8059f4..000000000
--- a/vendor/github.com/prometheus/procfs/net_softnet.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// For the proc file format details,
-// See:
-// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
-// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
-// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
-// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
-
-// SoftnetStat contains a single row of data from /proc/net/softnet_stat.
-type SoftnetStat struct {
- // Number of processed packets.
- Processed uint32
- // Number of dropped packets.
- Dropped uint32
- // Number of times processing packets ran out of quota.
- TimeSqueezed uint32
- // Number of collision occur while obtaining device lock while transmitting.
- CPUCollision uint32
- // Number of times cpu woken up received_rps.
- ReceivedRps uint32
- // number of times flow limit has been reached.
- FlowLimitCount uint32
- // Softnet backlog status.
- SoftnetBacklogLen uint32
- // CPU id owning this softnet_data.
- Index uint32
- // softnet_data's Width.
- Width int
-}
-
-var softNetProcFile = "net/softnet_stat"
-
-// NetSoftnetStat reads data from /proc/net/softnet_stat.
-func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
- b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile))
- if err != nil {
- return nil, err
- }
-
- entries, err := parseSoftnet(bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err)
- }
-
- return entries, nil
-}
-
-func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
- const minColumns = 9
-
- s := bufio.NewScanner(r)
-
- var stats []SoftnetStat
- cpuIndex := 0
- for s.Scan() {
- columns := strings.Fields(s.Text())
- width := len(columns)
- softnetStat := SoftnetStat{}
-
- if width < minColumns {
- return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns)
- }
-
- // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347
- if width >= minColumns {
- us, err := parseHexUint32s(columns[0:9])
- if err != nil {
- return nil, err
- }
-
- softnetStat.Processed = us[0]
- softnetStat.Dropped = us[1]
- softnetStat.TimeSqueezed = us[2]
- softnetStat.CPUCollision = us[8]
- }
-
- // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
- if width >= 10 {
- us, err := parseHexUint32s(columns[9:10])
- if err != nil {
- return nil, err
- }
-
- softnetStat.ReceivedRps = us[0]
- }
-
- // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
- if width >= 11 {
- us, err := parseHexUint32s(columns[10:11])
- if err != nil {
- return nil, err
- }
-
- softnetStat.FlowLimitCount = us[0]
- }
-
- // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
- if width >= 13 {
- us, err := parseHexUint32s(columns[11:13])
- if err != nil {
- return nil, err
- }
-
- softnetStat.SoftnetBacklogLen = us[0]
- softnetStat.Index = us[1]
- } else {
- // For older kernels, create the Index based on the scan line number.
- softnetStat.Index = uint32(cpuIndex)
- }
- softnetStat.Width = width
- stats = append(stats, softnetStat)
- cpuIndex++
- }
-
- return stats, nil
-}
-
-func parseHexUint32s(ss []string) ([]uint32, error) {
- us := make([]uint32, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseUint(s, 16, 32)
- if err != nil {
- return nil, err
- }
-
- us = append(us, uint32(u))
- }
-
- return us, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go
deleted file mode 100644
index 527762955..000000000
--- a/vendor/github.com/prometheus/procfs/net_tcp.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-type (
- // NetTCP represents the contents of /proc/net/tcp{,6} file without the header.
- NetTCP []*netIPSocketLine
-
- // NetTCPSummary provides already computed values like the total queue lengths or
- // the total number of used sockets. In contrast to NetTCP it does not collect
- // the parsed lines into a slice.
- NetTCPSummary NetIPSocketSummary
-)
-
-// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
-// read from /proc/net/tcp.
-func (fs FS) NetTCP() (NetTCP, error) {
- return newNetTCP(fs.proc.Path("net/tcp"))
-}
-
-// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
-// read from /proc/net/tcp6.
-func (fs FS) NetTCP6() (NetTCP, error) {
- return newNetTCP(fs.proc.Path("net/tcp6"))
-}
-
-// NetTCPSummary returns already computed statistics like the total queue lengths
-// for TCP datagrams read from /proc/net/tcp.
-func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
- return newNetTCPSummary(fs.proc.Path("net/tcp"))
-}
-
-// NetTCP6Summary returns already computed statistics like the total queue lengths
-// for TCP datagrams read from /proc/net/tcp6.
-func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
- return newNetTCPSummary(fs.proc.Path("net/tcp6"))
-}
-
-// newNetTCP creates a new NetTCP{,6} from the contents of the given file.
-func newNetTCP(file string) (NetTCP, error) {
- n, err := newNetIPSocket(file)
- n1 := NetTCP(n)
- return n1, err
-}
-
-func newNetTCPSummary(file string) (*NetTCPSummary, error) {
- n, err := newNetIPSocketSummary(file)
- if n == nil {
- return nil, err
- }
- n1 := NetTCPSummary(*n)
- return &n1, err
-}
diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go
deleted file mode 100644
index 13994c178..000000000
--- a/vendor/github.com/prometheus/procfs/net_tls_stat.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2023 Prometheus Team
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "os"
- "strconv"
- "strings"
-)
-
-// TLSStat struct represents data in /proc/net/tls_stat.
-// See https://docs.kernel.org/networking/tls.html#statistics
-type TLSStat struct {
- // number of TX sessions currently installed where host handles cryptography
- TLSCurrTxSw int
- // number of RX sessions currently installed where host handles cryptography
- TLSCurrRxSw int
- // number of TX sessions currently installed where NIC handles cryptography
- TLSCurrTxDevice int
- // number of RX sessions currently installed where NIC handles cryptography
- TLSCurrRxDevice int
- //number of TX sessions opened with host cryptography
- TLSTxSw int
- //number of RX sessions opened with host cryptography
- TLSRxSw int
- // number of TX sessions opened with NIC cryptography
- TLSTxDevice int
- // number of RX sessions opened with NIC cryptography
- TLSRxDevice int
- // record decryption failed (e.g. due to incorrect authentication tag)
- TLSDecryptError int
- // number of RX resyncs sent to NICs handling cryptography
- TLSRxDeviceResync int
- // number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records.
- TLSDecryptRetry int
- // number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction.
- TLSRxNoPadViolation int
-}
-
-// NewTLSStat reads the tls_stat statistics.
-func NewTLSStat() (TLSStat, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return TLSStat{}, err
- }
-
- return fs.NewTLSStat()
-}
-
-// NewTLSStat reads the tls_stat statistics.
-func (fs FS) NewTLSStat() (TLSStat, error) {
- file, err := os.Open(fs.proc.Path("net/tls_stat"))
- if err != nil {
- return TLSStat{}, err
- }
- defer file.Close()
-
- var (
- tlsstat = TLSStat{}
- s = bufio.NewScanner(file)
- )
-
- for s.Scan() {
- fields := strings.Fields(s.Text())
-
- if len(fields) != 2 {
- return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
- }
-
- name := fields[0]
- value, err := strconv.Atoi(fields[1])
- if err != nil {
- return TLSStat{}, err
- }
-
- switch name {
- case "TlsCurrTxSw":
- tlsstat.TLSCurrTxSw = value
- case "TlsCurrRxSw":
- tlsstat.TLSCurrRxSw = value
- case "TlsCurrTxDevice":
- tlsstat.TLSCurrTxDevice = value
- case "TlsCurrRxDevice":
- tlsstat.TLSCurrRxDevice = value
- case "TlsTxSw":
- tlsstat.TLSTxSw = value
- case "TlsRxSw":
- tlsstat.TLSRxSw = value
- case "TlsTxDevice":
- tlsstat.TLSTxDevice = value
- case "TlsRxDevice":
- tlsstat.TLSRxDevice = value
- case "TlsDecryptError":
- tlsstat.TLSDecryptError = value
- case "TlsRxDeviceResync":
- tlsstat.TLSRxDeviceResync = value
- case "TlsDecryptRetry":
- tlsstat.TLSDecryptRetry = value
- case "TlsRxNoPadViolation":
- tlsstat.TLSRxNoPadViolation = value
- }
-
- }
-
- return tlsstat, s.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go
deleted file mode 100644
index 9ac3daf2d..000000000
--- a/vendor/github.com/prometheus/procfs/net_udp.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-type (
- // NetUDP represents the contents of /proc/net/udp{,6} file without the header.
- NetUDP []*netIPSocketLine
-
- // NetUDPSummary provides already computed values like the total queue lengths or
- // the total number of used sockets. In contrast to NetUDP it does not collect
- // the parsed lines into a slice.
- NetUDPSummary NetIPSocketSummary
-)
-
-// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
-// read from /proc/net/udp.
-func (fs FS) NetUDP() (NetUDP, error) {
- return newNetUDP(fs.proc.Path("net/udp"))
-}
-
-// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams
-// read from /proc/net/udp6.
-func (fs FS) NetUDP6() (NetUDP, error) {
- return newNetUDP(fs.proc.Path("net/udp6"))
-}
-
-// NetUDPSummary returns already computed statistics like the total queue lengths
-// for UDP datagrams read from /proc/net/udp.
-func (fs FS) NetUDPSummary() (*NetUDPSummary, error) {
- return newNetUDPSummary(fs.proc.Path("net/udp"))
-}
-
-// NetUDP6Summary returns already computed statistics like the total queue lengths
-// for UDP datagrams read from /proc/net/udp6.
-func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
- return newNetUDPSummary(fs.proc.Path("net/udp6"))
-}
-
-// newNetUDP creates a new NetUDP{,6} from the contents of the given file.
-func newNetUDP(file string) (NetUDP, error) {
- n, err := newNetIPSocket(file)
- n1 := NetUDP(n)
- return n1, err
-}
-
-func newNetUDPSummary(file string) (*NetUDPSummary, error) {
- n, err := newNetIPSocketSummary(file)
- if n == nil {
- return nil, err
- }
- n1 := NetUDPSummary(*n)
- return &n1, err
-}
diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go
deleted file mode 100644
index d868cebda..000000000
--- a/vendor/github.com/prometheus/procfs/net_unix.go
+++ /dev/null
@@ -1,257 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "io"
- "os"
- "strconv"
- "strings"
-)
-
-// For the proc file format details,
-// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
-// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
-
-// Constants for the various /proc/net/unix enumerations.
-// TODO: match against x/sys/unix or similar?
-const (
- netUnixTypeStream = 1
- netUnixTypeDgram = 2
- netUnixTypeSeqpacket = 5
-
- netUnixFlagDefault = 0
- netUnixFlagListen = 1 << 16
-
- netUnixStateUnconnected = 1
- netUnixStateConnecting = 2
- netUnixStateConnected = 3
- netUnixStateDisconnected = 4
-)
-
-// NetUNIXType is the type of the type field.
-type NetUNIXType uint64
-
-// NetUNIXFlags is the type of the flags field.
-type NetUNIXFlags uint64
-
-// NetUNIXState is the type of the state field.
-type NetUNIXState uint64
-
-// NetUNIXLine represents a line of /proc/net/unix.
-type NetUNIXLine struct {
- KernelPtr string
- RefCount uint64
- Protocol uint64
- Flags NetUNIXFlags
- Type NetUNIXType
- State NetUNIXState
- Inode uint64
- Path string
-}
-
-// NetUNIX holds the data read from /proc/net/unix.
-type NetUNIX struct {
- Rows []*NetUNIXLine
-}
-
-// NetUNIX returns data read from /proc/net/unix.
-func (fs FS) NetUNIX() (*NetUNIX, error) {
- return readNetUNIX(fs.proc.Path("net/unix"))
-}
-
-// readNetUNIX reads data in /proc/net/unix format from the specified file.
-func readNetUNIX(file string) (*NetUNIX, error) {
- // This file could be quite large and a streaming read is desirable versus
- // reading the entire contents at once.
- f, err := os.Open(file)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- return parseNetUNIX(f)
-}
-
-// parseNetUNIX creates a NetUnix structure from the incoming stream.
-func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
- // Begin scanning by checking for the existence of Inode.
- s := bufio.NewScanner(r)
- s.Scan()
-
- // From the man page of proc(5), it does not contain an Inode field,
- // but in actually it exists. This code works for both cases.
- hasInode := strings.Contains(s.Text(), "Inode")
-
- // Expect a minimum number of fields, but Inode and Path are optional:
- // Num RefCount Protocol Flags Type St Inode Path
- minFields := 6
- if hasInode {
- minFields++
- }
-
- var nu NetUNIX
- for s.Scan() {
- line := s.Text()
- item, err := nu.parseLine(line, hasInode, minFields)
- if err != nil {
- return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
- }
-
- nu.Rows = append(nu.Rows, item)
- }
-
- if err := s.Err(); err != nil {
- return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err)
- }
-
- return &nu, nil
-}
-
-func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
- fields := strings.Fields(line)
-
- l := len(fields)
- if l < min {
- return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l)
- }
-
- // Field offsets are as follows:
- // Num RefCount Protocol Flags Type St Inode Path
-
- kernelPtr := strings.TrimSuffix(fields[0], ":")
-
- users, err := u.parseUsers(fields[1])
- if err != nil {
- return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err)
- }
-
- flags, err := u.parseFlags(fields[3])
- if err != nil {
- return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
- }
-
- typ, err := u.parseType(fields[4])
- if err != nil {
- return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
- }
-
- state, err := u.parseState(fields[5])
- if err != nil {
- return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
- }
-
- var inode uint64
- if hasInode {
- inode, err = u.parseInode(fields[6])
- if err != nil {
- return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err)
- }
- }
-
- n := &NetUNIXLine{
- KernelPtr: kernelPtr,
- RefCount: users,
- Type: typ,
- Flags: flags,
- State: state,
- Inode: inode,
- }
-
- // Path field is optional.
- if l > min {
- // Path occurs at either index 6 or 7 depending on whether inode is
- // already present.
- pathIdx := 7
- if !hasInode {
- pathIdx--
- }
-
- n.Path = fields[pathIdx]
- }
-
- return n, nil
-}
-
-func (u NetUNIX) parseUsers(s string) (uint64, error) {
- return strconv.ParseUint(s, 16, 32)
-}
-
-func (u NetUNIX) parseType(s string) (NetUNIXType, error) {
- typ, err := strconv.ParseUint(s, 16, 16)
- if err != nil {
- return 0, err
- }
-
- return NetUNIXType(typ), nil
-}
-
-func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) {
- flags, err := strconv.ParseUint(s, 16, 32)
- if err != nil {
- return 0, err
- }
-
- return NetUNIXFlags(flags), nil
-}
-
-func (u NetUNIX) parseState(s string) (NetUNIXState, error) {
- st, err := strconv.ParseInt(s, 16, 8)
- if err != nil {
- return 0, err
- }
-
- return NetUNIXState(st), nil
-}
-
-func (u NetUNIX) parseInode(s string) (uint64, error) {
- return strconv.ParseUint(s, 10, 64)
-}
-
-func (t NetUNIXType) String() string {
- switch t {
- case netUnixTypeStream:
- return "stream"
- case netUnixTypeDgram:
- return "dgram"
- case netUnixTypeSeqpacket:
- return "seqpacket"
- }
- return "unknown"
-}
-
-func (f NetUNIXFlags) String() string {
- switch f {
- case netUnixFlagListen:
- return "listen"
- default:
- return "default"
- }
-}
-
-func (s NetUNIXState) String() string {
- switch s {
- case netUnixStateUnconnected:
- return "unconnected"
- case netUnixStateConnecting:
- return "connecting"
- case netUnixStateConnected:
- return "connected"
- case netUnixStateDisconnected:
- return "disconnected"
- }
- return "unknown"
-}
diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go
deleted file mode 100644
index 7c597bc87..000000000
--- a/vendor/github.com/prometheus/procfs/net_wireless.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2023 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Wireless models the content of /proc/net/wireless.
-type Wireless struct {
- Name string
-
- // Status is the current 4-digit hex value status of the interface.
- Status uint64
-
- // QualityLink is the link quality.
- QualityLink int
-
- // QualityLevel is the signal gain (dBm).
- QualityLevel int
-
- // QualityNoise is the signal noise baseline (dBm).
- QualityNoise int
-
- // DiscardedNwid is the number of discarded packets with wrong nwid/essid.
- DiscardedNwid int
-
- // DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP).
- DiscardedCrypt int
-
- // DiscardedFrag is the number of discarded packets that can't perform MAC reassembly.
- DiscardedFrag int
-
- // DiscardedRetry is the number of discarded packets that reached max MAC retries.
- DiscardedRetry int
-
- // DiscardedMisc is the number of discarded packets for other reasons.
- DiscardedMisc int
-
- // MissedBeacon is the number of missed beacons/superframe.
- MissedBeacon int
-}
-
-// Wireless returns kernel wireless statistics.
-func (fs FS) Wireless() ([]*Wireless, error) {
- b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless"))
- if err != nil {
- return nil, err
- }
-
- m, err := parseWireless(bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err)
- }
-
- return m, nil
-}
-
-// parseWireless parses the contents of /proc/net/wireless.
-/*
-Inter-| sta-| Quality | Discarded packets | Missed | WE
-face | tus | link level noise | nwid crypt frag retry misc | beacon | 22
- eth1: 0000 5. -256. -10. 0 1 0 3 0 0
- eth2: 0000 5. -256. -20. 0 2 0 4 0 0
-*/
-func parseWireless(r io.Reader) ([]*Wireless, error) {
- var (
- interfaces []*Wireless
- scanner = bufio.NewScanner(r)
- )
-
- for n := 0; scanner.Scan(); n++ {
- // Skip the 2 header lines.
- if n < 2 {
- continue
- }
-
- line := scanner.Text()
-
- parts := strings.Split(line, ":")
- if len(parts) != 2 {
- return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line)
- }
-
- name := strings.TrimSpace(parts[0])
- stats := strings.Fields(parts[1])
-
- if len(stats) < 10 {
- return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line)
- }
-
- status, err := strconv.ParseUint(stats[0], 16, 16)
- if err != nil {
- return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line)
- }
-
- qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
- if err != nil {
- return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
- }
-
- qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
- if err != nil {
- return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
- }
-
- qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
- if err != nil {
- return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
- }
-
- dnwid, err := strconv.Atoi(stats[4])
- if err != nil {
- return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
- }
-
- dcrypt, err := strconv.Atoi(stats[5])
- if err != nil {
- return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
- }
-
- dfrag, err := strconv.Atoi(stats[6])
- if err != nil {
- return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
- }
-
- dretry, err := strconv.Atoi(stats[7])
- if err != nil {
- return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
- }
-
- dmisc, err := strconv.Atoi(stats[8])
- if err != nil {
- return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
- }
-
- mbeacon, err := strconv.Atoi(stats[9])
- if err != nil {
- return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
- }
-
- w := &Wireless{
- Name: name,
- Status: status,
- QualityLink: qlink,
- QualityLevel: qlevel,
- QualityNoise: qnoise,
- DiscardedNwid: dnwid,
- DiscardedCrypt: dcrypt,
- DiscardedFrag: dfrag,
- DiscardedRetry: dretry,
- DiscardedMisc: dmisc,
- MissedBeacon: mbeacon,
- }
-
- interfaces = append(interfaces, w)
- }
-
- if err := scanner.Err(); err != nil {
- return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
- }
-
- return interfaces, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go
deleted file mode 100644
index 932ef2046..000000000
--- a/vendor/github.com/prometheus/procfs/net_xfrm.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2017 Prometheus Team
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "os"
- "strconv"
- "strings"
-)
-
-// XfrmStat models the contents of /proc/net/xfrm_stat.
-type XfrmStat struct {
- // All errors which are not matched by other
- XfrmInError int
- // No buffer is left
- XfrmInBufferError int
- // Header Error
- XfrmInHdrError int
- // No state found
- // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
- XfrmInNoStates int
- // Transformation protocol specific error
- // e.g. SA Key is wrong
- XfrmInStateProtoError int
- // Transformation mode specific error
- XfrmInStateModeError int
- // Sequence error
- // e.g. sequence number is out of window
- XfrmInStateSeqError int
- // State is expired
- XfrmInStateExpired int
- // State has mismatch option
- // e.g. UDP encapsulation type is mismatched
- XfrmInStateMismatch int
- // State is invalid
- XfrmInStateInvalid int
- // No matching template for states
- // e.g. Inbound SAs are correct but SP rule is wrong
- XfrmInTmplMismatch int
- // No policy is found for states
- // e.g. Inbound SAs are correct but no SP is found
- XfrmInNoPols int
- // Policy discards
- XfrmInPolBlock int
- // Policy error
- XfrmInPolError int
- // All errors which are not matched by others
- XfrmOutError int
- // Bundle generation error
- XfrmOutBundleGenError int
- // Bundle check error
- XfrmOutBundleCheckError int
- // No state was found
- XfrmOutNoStates int
- // Transformation protocol specific error
- XfrmOutStateProtoError int
- // Transportation mode specific error
- XfrmOutStateModeError int
- // Sequence error
- // i.e sequence number overflow
- XfrmOutStateSeqError int
- // State is expired
- XfrmOutStateExpired int
- // Policy discads
- XfrmOutPolBlock int
- // Policy is dead
- XfrmOutPolDead int
- // Policy Error
- XfrmOutPolError int
- // Forward routing of a packet is not allowed
- XfrmFwdHdrError int
- // State is invalid, perhaps expired
- XfrmOutStateInvalid int
- // State hasn’t been fully acquired before use
- XfrmAcquireError int
-}
-
-// NewXfrmStat reads the xfrm_stat statistics.
-func NewXfrmStat() (XfrmStat, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return XfrmStat{}, err
- }
-
- return fs.NewXfrmStat()
-}
-
-// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
-func (fs FS) NewXfrmStat() (XfrmStat, error) {
- file, err := os.Open(fs.proc.Path("net/xfrm_stat"))
- if err != nil {
- return XfrmStat{}, err
- }
- defer file.Close()
-
- var (
- x = XfrmStat{}
- s = bufio.NewScanner(file)
- )
-
- for s.Scan() {
- fields := strings.Fields(s.Text())
-
- if len(fields) != 2 {
- return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
- }
-
- name := fields[0]
- value, err := strconv.Atoi(fields[1])
- if err != nil {
- return XfrmStat{}, err
- }
-
- switch name {
- case "XfrmInError":
- x.XfrmInError = value
- case "XfrmInBufferError":
- x.XfrmInBufferError = value
- case "XfrmInHdrError":
- x.XfrmInHdrError = value
- case "XfrmInNoStates":
- x.XfrmInNoStates = value
- case "XfrmInStateProtoError":
- x.XfrmInStateProtoError = value
- case "XfrmInStateModeError":
- x.XfrmInStateModeError = value
- case "XfrmInStateSeqError":
- x.XfrmInStateSeqError = value
- case "XfrmInStateExpired":
- x.XfrmInStateExpired = value
- case "XfrmInStateInvalid":
- x.XfrmInStateInvalid = value
- case "XfrmInTmplMismatch":
- x.XfrmInTmplMismatch = value
- case "XfrmInNoPols":
- x.XfrmInNoPols = value
- case "XfrmInPolBlock":
- x.XfrmInPolBlock = value
- case "XfrmInPolError":
- x.XfrmInPolError = value
- case "XfrmOutError":
- x.XfrmOutError = value
- case "XfrmInStateMismatch":
- x.XfrmInStateMismatch = value
- case "XfrmOutBundleGenError":
- x.XfrmOutBundleGenError = value
- case "XfrmOutBundleCheckError":
- x.XfrmOutBundleCheckError = value
- case "XfrmOutNoStates":
- x.XfrmOutNoStates = value
- case "XfrmOutStateProtoError":
- x.XfrmOutStateProtoError = value
- case "XfrmOutStateModeError":
- x.XfrmOutStateModeError = value
- case "XfrmOutStateSeqError":
- x.XfrmOutStateSeqError = value
- case "XfrmOutStateExpired":
- x.XfrmOutStateExpired = value
- case "XfrmOutPolBlock":
- x.XfrmOutPolBlock = value
- case "XfrmOutPolDead":
- x.XfrmOutPolDead = value
- case "XfrmOutPolError":
- x.XfrmOutPolError = value
- case "XfrmFwdHdrError":
- x.XfrmFwdHdrError = value
- case "XfrmOutStateInvalid":
- x.XfrmOutStateInvalid = value
- case "XfrmAcquireError":
- x.XfrmAcquireError = value
- }
-
- }
-
- return x, s.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go
deleted file mode 100644
index 742dff453..000000000
--- a/vendor/github.com/prometheus/procfs/netstat.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "os"
- "path/filepath"
- "strconv"
- "strings"
-)
-
-// NetStat contains statistics for all the counters from one file.
-type NetStat struct {
- Stats map[string][]uint64
- Filename string
-}
-
-// NetStat retrieves stats from `/proc/net/stat/`.
-func (fs FS) NetStat() ([]NetStat, error) {
- statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*"))
- if err != nil {
- return nil, err
- }
-
- var netStatsTotal []NetStat
-
- for _, filePath := range statFiles {
- procNetstat, err := parseNetstat(filePath)
- if err != nil {
- return nil, err
- }
- procNetstat.Filename = filepath.Base(filePath)
-
- netStatsTotal = append(netStatsTotal, procNetstat)
- }
- return netStatsTotal, nil
-}
-
-// parseNetstat parses the metrics from `/proc/net/stat/` file
-// and returns a NetStat structure.
-func parseNetstat(filePath string) (NetStat, error) {
- netStat := NetStat{
- Stats: make(map[string][]uint64),
- }
- file, err := os.Open(filePath)
- if err != nil {
- return netStat, err
- }
- defer file.Close()
-
- scanner := bufio.NewScanner(file)
- scanner.Scan()
-
- // First string is always a header for stats
- var headers []string
- headers = append(headers, strings.Fields(scanner.Text())...)
-
- // Other strings represent per-CPU counters
- for scanner.Scan() {
- for num, counter := range strings.Fields(scanner.Text()) {
- value, err := strconv.ParseUint(counter, 16, 64)
- if err != nil {
- return NetStat{}, err
- }
- netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value)
- }
- }
-
- return netStat, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
deleted file mode 100644
index 142796368..000000000
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ /dev/null
@@ -1,338 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "os"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Proc provides information about a running process.
-type Proc struct {
- // The process ID.
- PID int
-
- fs FS
-}
-
-// Procs represents a list of Proc structs.
-type Procs []Proc
-
-var (
- ErrFileParse = errors.New("Error Parsing File")
- ErrFileRead = errors.New("Error Reading File")
- ErrMountPoint = errors.New("Error Accessing Mount point")
-)
-
-func (p Procs) Len() int { return len(p) }
-func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
-
-// Self returns a process for the current process read via /proc/self.
-func Self() (Proc, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil || errors.Unwrap(err) == ErrMountPoint {
- return Proc{}, err
- }
- return fs.Self()
-}
-
-// NewProc returns a process for the given pid under /proc.
-func NewProc(pid int) (Proc, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return Proc{}, err
- }
- return fs.Proc(pid)
-}
-
-// AllProcs returns a list of all currently available processes under /proc.
-func AllProcs() (Procs, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return Procs{}, err
- }
- return fs.AllProcs()
-}
-
-// Self returns a process for the current process.
-func (fs FS) Self() (Proc, error) {
- p, err := os.Readlink(fs.proc.Path("self"))
- if err != nil {
- return Proc{}, err
- }
- pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
- if err != nil {
- return Proc{}, err
- }
- return fs.Proc(pid)
-}
-
-// NewProc returns a process for the given pid.
-//
-// Deprecated: Use fs.Proc() instead.
-func (fs FS) NewProc(pid int) (Proc, error) {
- return fs.Proc(pid)
-}
-
-// Proc returns a process for the given pid.
-func (fs FS) Proc(pid int) (Proc, error) {
- if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
- return Proc{}, err
- }
- return Proc{PID: pid, fs: fs}, nil
-}
-
-// AllProcs returns a list of all currently available processes.
-func (fs FS) AllProcs() (Procs, error) {
- d, err := os.Open(fs.proc.Path())
- if err != nil {
- return Procs{}, err
- }
- defer d.Close()
-
- names, err := d.Readdirnames(-1)
- if err != nil {
- return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
- }
-
- p := Procs{}
- for _, n := range names {
- pid, err := strconv.ParseInt(n, 10, 64)
- if err != nil {
- continue
- }
- p = append(p, Proc{PID: int(pid), fs: fs})
- }
-
- return p, nil
-}
-
-// CmdLine returns the command line of a process.
-func (p Proc) CmdLine() ([]string, error) {
- data, err := util.ReadFileNoStat(p.path("cmdline"))
- if err != nil {
- return nil, err
- }
-
- if len(data) < 1 {
- return []string{}, nil
- }
-
- return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil
-}
-
-// Wchan returns the wchan (wait channel) of a process.
-func (p Proc) Wchan() (string, error) {
- f, err := os.Open(p.path("wchan"))
- if err != nil {
- return "", err
- }
- defer f.Close()
-
- data, err := io.ReadAll(f)
- if err != nil {
- return "", err
- }
-
- wchan := string(data)
- if wchan == "" || wchan == "0" {
- return "", nil
- }
-
- return wchan, nil
-}
-
-// Comm returns the command name of a process.
-func (p Proc) Comm() (string, error) {
- data, err := util.ReadFileNoStat(p.path("comm"))
- if err != nil {
- return "", err
- }
-
- return strings.TrimSpace(string(data)), nil
-}
-
-// Executable returns the absolute path of the executable command of a process.
-func (p Proc) Executable() (string, error) {
- exe, err := os.Readlink(p.path("exe"))
- if os.IsNotExist(err) {
- return "", nil
- }
-
- return exe, err
-}
-
-// Cwd returns the absolute path to the current working directory of the process.
-func (p Proc) Cwd() (string, error) {
- wd, err := os.Readlink(p.path("cwd"))
- if os.IsNotExist(err) {
- return "", nil
- }
-
- return wd, err
-}
-
-// RootDir returns the absolute path to the process's root directory (as set by chroot).
-func (p Proc) RootDir() (string, error) {
- rdir, err := os.Readlink(p.path("root"))
- if os.IsNotExist(err) {
- return "", nil
- }
-
- return rdir, err
-}
-
-// FileDescriptors returns the currently open file descriptors of a process.
-func (p Proc) FileDescriptors() ([]uintptr, error) {
- names, err := p.fileDescriptors()
- if err != nil {
- return nil, err
- }
-
- fds := make([]uintptr, len(names))
- for i, n := range names {
- fd, err := strconv.ParseInt(n, 10, 32)
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err)
- }
- fds[i] = uintptr(fd)
- }
-
- return fds, nil
-}
-
-// FileDescriptorTargets returns the targets of all file descriptors of a process.
-// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
-func (p Proc) FileDescriptorTargets() ([]string, error) {
- names, err := p.fileDescriptors()
- if err != nil {
- return nil, err
- }
-
- targets := make([]string, len(names))
-
- for i, name := range names {
- target, err := os.Readlink(p.path("fd", name))
- if err == nil {
- targets[i] = target
- }
- }
-
- return targets, nil
-}
-
-// FileDescriptorsLen returns the number of currently open file descriptors of
-// a process.
-func (p Proc) FileDescriptorsLen() (int, error) {
- // Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
- if p.fs.isReal {
- stat, err := os.Stat(p.path("fd"))
- if err != nil {
- return 0, err
- }
-
- size := stat.Size()
- if size > 0 {
- return int(size), nil
- }
- }
-
- fds, err := p.fileDescriptors()
- if err != nil {
- return 0, err
- }
-
- return len(fds), nil
-}
-
-// MountStats retrieves statistics and configuration for mount points in a
-// process's namespace.
-func (p Proc) MountStats() ([]*Mount, error) {
- f, err := os.Open(p.path("mountstats"))
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- return parseMountStats(f)
-}
-
-// MountInfo retrieves mount information for mount points in a
-// process's namespace.
-// It supplies information missing in `/proc/self/mounts` and
-// fixes various other problems with that file too.
-func (p Proc) MountInfo() ([]*MountInfo, error) {
- data, err := util.ReadFileNoStat(p.path("mountinfo"))
- if err != nil {
- return nil, err
- }
- return parseMountInfo(data)
-}
-
-func (p Proc) fileDescriptors() ([]string, error) {
- d, err := os.Open(p.path("fd"))
- if err != nil {
- return nil, err
- }
- defer d.Close()
-
- names, err := d.Readdirnames(-1)
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
- }
-
- return names, nil
-}
-
-func (p Proc) path(pa ...string) string {
- return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
-}
-
-// FileDescriptorsInfo retrieves information about all file descriptors of
-// the process.
-func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) {
- names, err := p.fileDescriptors()
- if err != nil {
- return nil, err
- }
-
- var fdinfos ProcFDInfos
-
- for _, n := range names {
- fdinfo, err := p.FDInfo(n)
- if err != nil {
- continue
- }
- fdinfos = append(fdinfos, *fdinfo)
- }
-
- return fdinfos, nil
-}
-
-// Schedstat returns task scheduling information for the process.
-func (p Proc) Schedstat() (ProcSchedstat, error) {
- contents, err := os.ReadFile(p.path("schedstat"))
- if err != nil {
- return ProcSchedstat{}, err
- }
- return parseProcSchedstat(string(contents))
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
deleted file mode 100644
index daeed7f57..000000000
--- a/vendor/github.com/prometheus/procfs/proc_cgroup.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
-// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
-// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
-// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
-// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
-// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID
-// in this hierarchy
-//
-// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
-type Cgroup struct {
- // HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one
- // hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number
- HierarchyID int
- // Controllers using this hierarchy of processes. Controllers are also known as subsystems. For
- // Cgroups V2 this may be empty, as all active controllers use the same hierarchy
- Controllers []string
- // Path of this control group, relative to the mount point of the cgroupfs representing this specific
- // hierarchy
- Path string
-}
-
-// parseCgroupString parses each line of the /proc/[pid]/cgroup file
-// Line format is hierarchyID:[controller1,controller2]:path.
-func parseCgroupString(cgroupStr string) (*Cgroup, error) {
- var err error
-
- fields := strings.SplitN(cgroupStr, ":", 3)
- if len(fields) < 3 {
- return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr)
- }
-
- cgroup := &Cgroup{
- Path: fields[2],
- Controllers: nil,
- }
- cgroup.HierarchyID, err = strconv.Atoi(fields[0])
- if err != nil {
- return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID)
- }
- if fields[1] != "" {
- ssNames := strings.Split(fields[1], ",")
- cgroup.Controllers = append(cgroup.Controllers, ssNames...)
- }
- return cgroup, nil
-}
-
-// parseCgroups reads each line of the /proc/[pid]/cgroup file.
-func parseCgroups(data []byte) ([]Cgroup, error) {
- var cgroups []Cgroup
- scanner := bufio.NewScanner(bytes.NewReader(data))
- for scanner.Scan() {
- mountString := scanner.Text()
- parsedMounts, err := parseCgroupString(mountString)
- if err != nil {
- return nil, err
- }
- cgroups = append(cgroups, *parsedMounts)
- }
-
- err := scanner.Err()
- return cgroups, err
-}
-
-// Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process
-// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
-// so the len of the returned struct is equal to the number of active hierarchies on this system.
-func (p Proc) Cgroups() ([]Cgroup, error) {
- data, err := util.ReadFileNoStat(p.path("cgroup"))
- if err != nil {
- return nil, err
- }
- return parseCgroups(data)
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go
deleted file mode 100644
index 5dd493899..000000000
--- a/vendor/github.com/prometheus/procfs/proc_cgroups.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2021 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// CgroupSummary models one line from /proc/cgroups.
-// This file contains information about the controllers that are compiled into the kernel.
-//
-// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
-type CgroupSummary struct {
- // The name of the controller. controller is also known as subsystem.
- SubsysName string
- // The unique ID of the cgroup hierarchy on which this controller is mounted.
- Hierarchy int
- // The number of control groups in this hierarchy using this controller.
- Cgroups int
- // This field contains the value 1 if this controller is enabled, or 0 if it has been disabled
- Enabled int
-}
-
-// parseCgroupSummary parses each line of the /proc/cgroup file
-// Line format is `subsys_name hierarchy num_cgroups enabled`.
-func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
- var err error
-
- fields := strings.Fields(CgroupSummaryStr)
- // require at least 4 fields
- if len(fields) < 4 {
- return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr)
- }
-
- CgroupSummary := &CgroupSummary{
- SubsysName: fields[0],
- }
- CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1])
- if err != nil {
- return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1])
- }
- CgroupSummary.Cgroups, err = strconv.Atoi(fields[2])
- if err != nil {
- return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2])
- }
- CgroupSummary.Enabled, err = strconv.Atoi(fields[3])
- if err != nil {
- return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3])
- }
- return CgroupSummary, nil
-}
-
-// parseCgroupSummary reads each line of the /proc/cgroup file.
-func parseCgroupSummary(data []byte) ([]CgroupSummary, error) {
- var CgroupSummarys []CgroupSummary
- scanner := bufio.NewScanner(bytes.NewReader(data))
- for scanner.Scan() {
- CgroupSummaryString := scanner.Text()
- // ignore comment lines
- if strings.HasPrefix(CgroupSummaryString, "#") {
- continue
- }
- CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString)
- if err != nil {
- return nil, err
- }
- CgroupSummarys = append(CgroupSummarys, *CgroupSummary)
- }
-
- err := scanner.Err()
- return CgroupSummarys, err
-}
-
-// CgroupSummarys returns information about current /proc/cgroups.
-func (fs FS) CgroupSummarys() ([]CgroupSummary, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path("cgroups"))
- if err != nil {
- return nil, err
- }
- return parseCgroupSummary(data)
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go
deleted file mode 100644
index 57a89895d..000000000
--- a/vendor/github.com/prometheus/procfs/proc_environ.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Environ reads process environments from `/proc//environ`.
-func (p Proc) Environ() ([]string, error) {
- environments := make([]string, 0)
-
- data, err := util.ReadFileNoStat(p.path("environ"))
- if err != nil {
- return environments, err
- }
-
- environments = strings.Split(string(data), "\000")
- if len(environments) > 0 {
- environments = environments[:len(environments)-1]
- }
-
- return environments, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
deleted file mode 100644
index fa761b352..000000000
--- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "regexp"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-var (
- rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
- rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
- rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
- rIno = regexp.MustCompile(`^ino:\s+(\d+)$`)
- rInotify = regexp.MustCompile(`^inotify`)
- rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
-)
-
-// ProcFDInfo contains represents file descriptor information.
-type ProcFDInfo struct {
- // File descriptor
- FD string
- // File offset
- Pos string
- // File access mode and status flags
- Flags string
- // Mount point ID
- MntID string
- // Inode number
- Ino string
- // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
- InotifyInfos []InotifyInfo
-}
-
-// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty.
-func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
- data, err := util.ReadFileNoStat(p.path("fdinfo", fd))
- if err != nil {
- return nil, err
- }
-
- var text, pos, flags, mntid, ino string
- var inotify []InotifyInfo
-
- scanner := bufio.NewScanner(bytes.NewReader(data))
- for scanner.Scan() {
- text = scanner.Text()
- if rPos.MatchString(text) {
- pos = rPos.FindStringSubmatch(text)[1]
- } else if rFlags.MatchString(text) {
- flags = rFlags.FindStringSubmatch(text)[1]
- } else if rMntID.MatchString(text) {
- mntid = rMntID.FindStringSubmatch(text)[1]
- } else if rIno.MatchString(text) {
- ino = rIno.FindStringSubmatch(text)[1]
- } else if rInotify.MatchString(text) {
- newInotify, err := parseInotifyInfo(text)
- if err != nil {
- return nil, err
- }
- inotify = append(inotify, *newInotify)
- }
- }
-
- i := &ProcFDInfo{
- FD: fd,
- Pos: pos,
- Flags: flags,
- MntID: mntid,
- Ino: ino,
- InotifyInfos: inotify,
- }
-
- return i, nil
-}
-
-// InotifyInfo represents a single inotify line in the fdinfo file.
-type InotifyInfo struct {
- // Watch descriptor number
- WD string
- // Inode number
- Ino string
- // Device ID
- Sdev string
- // Mask of events being monitored
- Mask string
-}
-
-// InotifyInfo constructor. Only available on kernel 3.8+.
-func parseInotifyInfo(line string) (*InotifyInfo, error) {
- m := rInotifyParts.FindStringSubmatch(line)
- if len(m) >= 4 {
- var mask string
- if len(m) == 5 {
- mask = m[4]
- }
- i := &InotifyInfo{
- WD: m[1],
- Ino: m[2],
- Sdev: m[3],
- Mask: mask,
- }
- return i, nil
- }
- return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line)
-}
-
-// ProcFDInfos represents a list of ProcFDInfo structs.
-type ProcFDInfos []ProcFDInfo
-
-func (p ProcFDInfos) Len() int { return len(p) }
-func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD }
-
-// InotifyWatchLen returns the total number of inotify watches.
-func (p ProcFDInfos) InotifyWatchLen() (int, error) {
- length := 0
- for _, f := range p {
- length += len(f.InotifyInfos)
- }
-
- return length, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go
deleted file mode 100644
index 86b4b4524..000000000
--- a/vendor/github.com/prometheus/procfs/proc_interrupts.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Interrupt represents a single interrupt line.
-type Interrupt struct {
- // Info is the type of interrupt.
- Info string
- // Devices is the name of the device that is located at that IRQ
- Devices string
- // Values is the number of interrupts per CPU.
- Values []string
-}
-
-// Interrupts models the content of /proc/interrupts. Key is the IRQ number.
-// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts
-// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output
-type Interrupts map[string]Interrupt
-
-// Interrupts creates a new instance from a given Proc instance.
-func (p Proc) Interrupts() (Interrupts, error) {
- data, err := util.ReadFileNoStat(p.path("interrupts"))
- if err != nil {
- return nil, err
- }
- return parseInterrupts(bytes.NewReader(data))
-}
-
-func parseInterrupts(r io.Reader) (Interrupts, error) {
- var (
- interrupts = Interrupts{}
- scanner = bufio.NewScanner(r)
- )
-
- if !scanner.Scan() {
- return nil, errors.New("interrupts empty")
- }
- cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu
-
- for scanner.Scan() {
- parts := strings.Fields(scanner.Text())
- if len(parts) == 0 { // skip empty lines
- continue
- }
- if len(parts) < 2 {
- return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts)
- }
- intName := parts[0][:len(parts[0])-1] // remove trailing :
-
- if len(parts) == 2 {
- interrupts[intName] = Interrupt{
- Info: "",
- Devices: "",
- Values: []string{
- parts[1],
- },
- }
- continue
- }
-
- intr := Interrupt{
- Values: parts[1 : cpuNum+1],
- }
-
- if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt
- intr.Info = parts[cpuNum+1]
- intr.Devices = strings.Join(parts[cpuNum+2:], " ")
- } else {
- intr.Info = strings.Join(parts[cpuNum+1:], " ")
- }
- interrupts[intName] = intr
- }
-
- return interrupts, scanner.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
deleted file mode 100644
index 776f34971..000000000
--- a/vendor/github.com/prometheus/procfs/proc_io.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// ProcIO models the content of /proc//io.
-type ProcIO struct {
- // Chars read.
- RChar uint64
- // Chars written.
- WChar uint64
- // Read syscalls.
- SyscR uint64
- // Write syscalls.
- SyscW uint64
- // Bytes read.
- ReadBytes uint64
- // Bytes written.
- WriteBytes uint64
- // Bytes written, but taking into account truncation. See
- // Documentation/filesystems/proc.txt in the kernel sources for
- // detailed explanation.
- CancelledWriteBytes int64
-}
-
-// IO creates a new ProcIO instance from a given Proc instance.
-func (p Proc) IO() (ProcIO, error) {
- pio := ProcIO{}
-
- data, err := util.ReadFileNoStat(p.path("io"))
- if err != nil {
- return pio, err
- }
-
- ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
- "read_bytes: %d\nwrite_bytes: %d\n" +
- "cancelled_write_bytes: %d\n"
-
- _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
- &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
-
- return pio, err
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
deleted file mode 100644
index 9530b14bc..000000000
--- a/vendor/github.com/prometheus/procfs/proc_limits.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "os"
- "regexp"
- "strconv"
-)
-
-// ProcLimits represents the soft limits for each of the process's resource
-// limits. For more information see getrlimit(2):
-// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
-type ProcLimits struct {
- // CPU time limit in seconds.
- CPUTime uint64
- // Maximum size of files that the process may create.
- FileSize uint64
- // Maximum size of the process's data segment (initialized data,
- // uninitialized data, and heap).
- DataSize uint64
- // Maximum size of the process stack in bytes.
- StackSize uint64
- // Maximum size of a core file.
- CoreFileSize uint64
- // Limit of the process's resident set in pages.
- ResidentSet uint64
- // Maximum number of processes that can be created for the real user ID of
- // the calling process.
- Processes uint64
- // Value one greater than the maximum file descriptor number that can be
- // opened by this process.
- OpenFiles uint64
- // Maximum number of bytes of memory that may be locked into RAM.
- LockedMemory uint64
- // Maximum size of the process's virtual memory address space in bytes.
- AddressSpace uint64
- // Limit on the combined number of flock(2) locks and fcntl(2) leases that
- // this process may establish.
- FileLocks uint64
- // Limit of signals that may be queued for the real user ID of the calling
- // process.
- PendingSignals uint64
- // Limit on the number of bytes that can be allocated for POSIX message
- // queues for the real user ID of the calling process.
- MsqqueueSize uint64
- // Limit of the nice priority set using setpriority(2) or nice(2).
- NicePriority uint64
- // Limit of the real-time priority set using sched_setscheduler(2) or
- // sched_setparam(2).
- RealtimePriority uint64
- // Limit (in microseconds) on the amount of CPU time that a process
- // scheduled under a real-time scheduling policy may consume without making
- // a blocking system call.
- RealtimeTimeout uint64
-}
-
-const (
- limitsFields = 4
- limitsUnlimited = "unlimited"
-)
-
-var (
- limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`)
-)
-
-// NewLimits returns the current soft limits of the process.
-//
-// Deprecated: Use p.Limits() instead.
-func (p Proc) NewLimits() (ProcLimits, error) {
- return p.Limits()
-}
-
-// Limits returns the current soft limits of the process.
-func (p Proc) Limits() (ProcLimits, error) {
- f, err := os.Open(p.path("limits"))
- if err != nil {
- return ProcLimits{}, err
- }
- defer f.Close()
-
- var (
- l = ProcLimits{}
- s = bufio.NewScanner(f)
- )
-
- s.Scan() // Skip limits header
-
- for s.Scan() {
- //fields := limitsMatch.Split(s.Text(), limitsFields)
- fields := limitsMatch.FindStringSubmatch(s.Text())
- if len(fields) != limitsFields {
- return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text())
- }
-
- switch fields[1] {
- case "Max cpu time":
- l.CPUTime, err = parseUint(fields[2])
- case "Max file size":
- l.FileSize, err = parseUint(fields[2])
- case "Max data size":
- l.DataSize, err = parseUint(fields[2])
- case "Max stack size":
- l.StackSize, err = parseUint(fields[2])
- case "Max core file size":
- l.CoreFileSize, err = parseUint(fields[2])
- case "Max resident set":
- l.ResidentSet, err = parseUint(fields[2])
- case "Max processes":
- l.Processes, err = parseUint(fields[2])
- case "Max open files":
- l.OpenFiles, err = parseUint(fields[2])
- case "Max locked memory":
- l.LockedMemory, err = parseUint(fields[2])
- case "Max address space":
- l.AddressSpace, err = parseUint(fields[2])
- case "Max file locks":
- l.FileLocks, err = parseUint(fields[2])
- case "Max pending signals":
- l.PendingSignals, err = parseUint(fields[2])
- case "Max msgqueue size":
- l.MsqqueueSize, err = parseUint(fields[2])
- case "Max nice priority":
- l.NicePriority, err = parseUint(fields[2])
- case "Max realtime priority":
- l.RealtimePriority, err = parseUint(fields[2])
- case "Max realtime timeout":
- l.RealtimeTimeout, err = parseUint(fields[2])
- }
- if err != nil {
- return ProcLimits{}, err
- }
- }
-
- return l, s.Err()
-}
-
-func parseUint(s string) (uint64, error) {
- if s == limitsUnlimited {
- return 18446744073709551615, nil
- }
- i, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err)
- }
- return i, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go
deleted file mode 100644
index 7e75c286b..000000000
--- a/vendor/github.com/prometheus/procfs/proc_maps.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
-// +build !js
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "os"
- "strconv"
- "strings"
-
- "golang.org/x/sys/unix"
-)
-
-// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`.
-type ProcMapPermissions struct {
- // mapping has the [R]ead flag set
- Read bool
- // mapping has the [W]rite flag set
- Write bool
- // mapping has the [X]ecutable flag set
- Execute bool
- // mapping has the [S]hared flag set
- Shared bool
- // mapping is marked as [P]rivate (copy on write)
- Private bool
-}
-
-// ProcMap contains the process memory-mappings of the process
-// read from `/proc/[pid]/maps`.
-type ProcMap struct {
- // The start address of current mapping.
- StartAddr uintptr
- // The end address of the current mapping
- EndAddr uintptr
- // The permissions for this mapping
- Perms *ProcMapPermissions
- // The current offset into the file/fd (e.g., shared libs)
- Offset int64
- // Device owner of this mapping (major:minor) in Mkdev format.
- Dev uint64
- // The inode of the device above
- Inode uint64
- // The file or psuedofile (or empty==anonymous)
- Pathname string
-}
-
-// parseDevice parses the device token of a line and converts it to a dev_t
-// (mkdev) like structure.
-func parseDevice(s string) (uint64, error) {
- i := strings.Index(s, ":")
- if i == -1 {
- return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s)
- }
-
- major, err := strconv.ParseUint(s[0:i], 16, 0)
- if err != nil {
- return 0, err
- }
-
- minor, err := strconv.ParseUint(s[i+1:], 16, 0)
- if err != nil {
- return 0, err
- }
-
- return unix.Mkdev(uint32(major), uint32(minor)), nil
-}
-
-// parseAddress converts a hex-string to a uintptr.
-func parseAddress(s string) (uintptr, error) {
- a, err := strconv.ParseUint(s, 16, 0)
- if err != nil {
- return 0, err
- }
-
- return uintptr(a), nil
-}
-
-// parseAddresses parses the start-end address.
-func parseAddresses(s string) (uintptr, uintptr, error) {
- idx := strings.Index(s, "-")
- if idx == -1 {
- return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s)
- }
-
- saddr, err := parseAddress(s[0:idx])
- if err != nil {
- return 0, 0, err
- }
-
- eaddr, err := parseAddress(s[idx+1:])
- if err != nil {
- return 0, 0, err
- }
-
- return saddr, eaddr, nil
-}
-
-// parsePermissions parses a token and returns any that are set.
-func parsePermissions(s string) (*ProcMapPermissions, error) {
- if len(s) < 4 {
- return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse)
- }
-
- perms := ProcMapPermissions{}
- for _, ch := range s {
- switch ch {
- case 'r':
- perms.Read = true
- case 'w':
- perms.Write = true
- case 'x':
- perms.Execute = true
- case 'p':
- perms.Private = true
- case 's':
- perms.Shared = true
- }
- }
-
- return &perms, nil
-}
-
-// parseProcMap will attempt to parse a single line within a proc/[pid]/maps
-// buffer.
-func parseProcMap(text string) (*ProcMap, error) {
- fields := strings.Fields(text)
- if len(fields) < 5 {
- return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse)
- }
-
- saddr, eaddr, err := parseAddresses(fields[0])
- if err != nil {
- return nil, err
- }
-
- perms, err := parsePermissions(fields[1])
- if err != nil {
- return nil, err
- }
-
- offset, err := strconv.ParseInt(fields[2], 16, 0)
- if err != nil {
- return nil, err
- }
-
- device, err := parseDevice(fields[3])
- if err != nil {
- return nil, err
- }
-
- inode, err := strconv.ParseUint(fields[4], 10, 0)
- if err != nil {
- return nil, err
- }
-
- pathname := ""
-
- if len(fields) >= 5 {
- pathname = strings.Join(fields[5:], " ")
- }
-
- return &ProcMap{
- StartAddr: saddr,
- EndAddr: eaddr,
- Perms: perms,
- Offset: offset,
- Dev: device,
- Inode: inode,
- Pathname: pathname,
- }, nil
-}
-
-// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the
-// process.
-func (p Proc) ProcMaps() ([]*ProcMap, error) {
- file, err := os.Open(p.path("maps"))
- if err != nil {
- return nil, err
- }
- defer file.Close()
-
- maps := []*ProcMap{}
- scan := bufio.NewScanner(file)
-
- for scan.Scan() {
- m, err := parseProcMap(scan.Text())
- if err != nil {
- return nil, err
- }
-
- maps = append(maps, m)
- }
-
- return maps, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go
deleted file mode 100644
index 8e3ff4d79..000000000
--- a/vendor/github.com/prometheus/procfs/proc_netstat.go
+++ /dev/null
@@ -1,443 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// ProcNetstat models the content of /proc//net/netstat.
-type ProcNetstat struct {
- // The process ID.
- PID int
- TcpExt
- IpExt
-}
-
-type TcpExt struct { // nolint:revive
- SyncookiesSent *float64
- SyncookiesRecv *float64
- SyncookiesFailed *float64
- EmbryonicRsts *float64
- PruneCalled *float64
- RcvPruned *float64
- OfoPruned *float64
- OutOfWindowIcmps *float64
- LockDroppedIcmps *float64
- ArpFilter *float64
- TW *float64
- TWRecycled *float64
- TWKilled *float64
- PAWSActive *float64
- PAWSEstab *float64
- DelayedACKs *float64
- DelayedACKLocked *float64
- DelayedACKLost *float64
- ListenOverflows *float64
- ListenDrops *float64
- TCPHPHits *float64
- TCPPureAcks *float64
- TCPHPAcks *float64
- TCPRenoRecovery *float64
- TCPSackRecovery *float64
- TCPSACKReneging *float64
- TCPSACKReorder *float64
- TCPRenoReorder *float64
- TCPTSReorder *float64
- TCPFullUndo *float64
- TCPPartialUndo *float64
- TCPDSACKUndo *float64
- TCPLossUndo *float64
- TCPLostRetransmit *float64
- TCPRenoFailures *float64
- TCPSackFailures *float64
- TCPLossFailures *float64
- TCPFastRetrans *float64
- TCPSlowStartRetrans *float64
- TCPTimeouts *float64
- TCPLossProbes *float64
- TCPLossProbeRecovery *float64
- TCPRenoRecoveryFail *float64
- TCPSackRecoveryFail *float64
- TCPRcvCollapsed *float64
- TCPDSACKOldSent *float64
- TCPDSACKOfoSent *float64
- TCPDSACKRecv *float64
- TCPDSACKOfoRecv *float64
- TCPAbortOnData *float64
- TCPAbortOnClose *float64
- TCPAbortOnMemory *float64
- TCPAbortOnTimeout *float64
- TCPAbortOnLinger *float64
- TCPAbortFailed *float64
- TCPMemoryPressures *float64
- TCPMemoryPressuresChrono *float64
- TCPSACKDiscard *float64
- TCPDSACKIgnoredOld *float64
- TCPDSACKIgnoredNoUndo *float64
- TCPSpuriousRTOs *float64
- TCPMD5NotFound *float64
- TCPMD5Unexpected *float64
- TCPMD5Failure *float64
- TCPSackShifted *float64
- TCPSackMerged *float64
- TCPSackShiftFallback *float64
- TCPBacklogDrop *float64
- PFMemallocDrop *float64
- TCPMinTTLDrop *float64
- TCPDeferAcceptDrop *float64
- IPReversePathFilter *float64
- TCPTimeWaitOverflow *float64
- TCPReqQFullDoCookies *float64
- TCPReqQFullDrop *float64
- TCPRetransFail *float64
- TCPRcvCoalesce *float64
- TCPRcvQDrop *float64
- TCPOFOQueue *float64
- TCPOFODrop *float64
- TCPOFOMerge *float64
- TCPChallengeACK *float64
- TCPSYNChallenge *float64
- TCPFastOpenActive *float64
- TCPFastOpenActiveFail *float64
- TCPFastOpenPassive *float64
- TCPFastOpenPassiveFail *float64
- TCPFastOpenListenOverflow *float64
- TCPFastOpenCookieReqd *float64
- TCPFastOpenBlackhole *float64
- TCPSpuriousRtxHostQueues *float64
- BusyPollRxPackets *float64
- TCPAutoCorking *float64
- TCPFromZeroWindowAdv *float64
- TCPToZeroWindowAdv *float64
- TCPWantZeroWindowAdv *float64
- TCPSynRetrans *float64
- TCPOrigDataSent *float64
- TCPHystartTrainDetect *float64
- TCPHystartTrainCwnd *float64
- TCPHystartDelayDetect *float64
- TCPHystartDelayCwnd *float64
- TCPACKSkippedSynRecv *float64
- TCPACKSkippedPAWS *float64
- TCPACKSkippedSeq *float64
- TCPACKSkippedFinWait2 *float64
- TCPACKSkippedTimeWait *float64
- TCPACKSkippedChallenge *float64
- TCPWinProbe *float64
- TCPKeepAlive *float64
- TCPMTUPFail *float64
- TCPMTUPSuccess *float64
- TCPWqueueTooBig *float64
-}
-
-type IpExt struct { // nolint:revive
- InNoRoutes *float64
- InTruncatedPkts *float64
- InMcastPkts *float64
- OutMcastPkts *float64
- InBcastPkts *float64
- OutBcastPkts *float64
- InOctets *float64
- OutOctets *float64
- InMcastOctets *float64
- OutMcastOctets *float64
- InBcastOctets *float64
- OutBcastOctets *float64
- InCsumErrors *float64
- InNoECTPkts *float64
- InECT1Pkts *float64
- InECT0Pkts *float64
- InCEPkts *float64
- ReasmOverlaps *float64
-}
-
-func (p Proc) Netstat() (ProcNetstat, error) {
- filename := p.path("net/netstat")
- data, err := util.ReadFileNoStat(filename)
- if err != nil {
- return ProcNetstat{PID: p.PID}, err
- }
- procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename)
- procNetstat.PID = p.PID
- return procNetstat, err
-}
-
-// parseProcNetstat parses the metrics from proc//net/netstat file
-// and returns a ProcNetstat structure.
-func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
- var (
- scanner = bufio.NewScanner(r)
- procNetstat = ProcNetstat{}
- )
-
- for scanner.Scan() {
- nameParts := strings.Split(scanner.Text(), " ")
- scanner.Scan()
- valueParts := strings.Split(scanner.Text(), " ")
- // Remove trailing :.
- protocol := strings.TrimSuffix(nameParts[0], ":")
- if len(nameParts) != len(valueParts) {
- return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
- ErrFileParse, fileName, protocol)
- }
- for i := 1; i < len(nameParts); i++ {
- value, err := strconv.ParseFloat(valueParts[i], 64)
- if err != nil {
- return procNetstat, err
- }
- key := nameParts[i]
-
- switch protocol {
- case "TcpExt":
- switch key {
- case "SyncookiesSent":
- procNetstat.TcpExt.SyncookiesSent = &value
- case "SyncookiesRecv":
- procNetstat.TcpExt.SyncookiesRecv = &value
- case "SyncookiesFailed":
- procNetstat.TcpExt.SyncookiesFailed = &value
- case "EmbryonicRsts":
- procNetstat.TcpExt.EmbryonicRsts = &value
- case "PruneCalled":
- procNetstat.TcpExt.PruneCalled = &value
- case "RcvPruned":
- procNetstat.TcpExt.RcvPruned = &value
- case "OfoPruned":
- procNetstat.TcpExt.OfoPruned = &value
- case "OutOfWindowIcmps":
- procNetstat.TcpExt.OutOfWindowIcmps = &value
- case "LockDroppedIcmps":
- procNetstat.TcpExt.LockDroppedIcmps = &value
- case "ArpFilter":
- procNetstat.TcpExt.ArpFilter = &value
- case "TW":
- procNetstat.TcpExt.TW = &value
- case "TWRecycled":
- procNetstat.TcpExt.TWRecycled = &value
- case "TWKilled":
- procNetstat.TcpExt.TWKilled = &value
- case "PAWSActive":
- procNetstat.TcpExt.PAWSActive = &value
- case "PAWSEstab":
- procNetstat.TcpExt.PAWSEstab = &value
- case "DelayedACKs":
- procNetstat.TcpExt.DelayedACKs = &value
- case "DelayedACKLocked":
- procNetstat.TcpExt.DelayedACKLocked = &value
- case "DelayedACKLost":
- procNetstat.TcpExt.DelayedACKLost = &value
- case "ListenOverflows":
- procNetstat.TcpExt.ListenOverflows = &value
- case "ListenDrops":
- procNetstat.TcpExt.ListenDrops = &value
- case "TCPHPHits":
- procNetstat.TcpExt.TCPHPHits = &value
- case "TCPPureAcks":
- procNetstat.TcpExt.TCPPureAcks = &value
- case "TCPHPAcks":
- procNetstat.TcpExt.TCPHPAcks = &value
- case "TCPRenoRecovery":
- procNetstat.TcpExt.TCPRenoRecovery = &value
- case "TCPSackRecovery":
- procNetstat.TcpExt.TCPSackRecovery = &value
- case "TCPSACKReneging":
- procNetstat.TcpExt.TCPSACKReneging = &value
- case "TCPSACKReorder":
- procNetstat.TcpExt.TCPSACKReorder = &value
- case "TCPRenoReorder":
- procNetstat.TcpExt.TCPRenoReorder = &value
- case "TCPTSReorder":
- procNetstat.TcpExt.TCPTSReorder = &value
- case "TCPFullUndo":
- procNetstat.TcpExt.TCPFullUndo = &value
- case "TCPPartialUndo":
- procNetstat.TcpExt.TCPPartialUndo = &value
- case "TCPDSACKUndo":
- procNetstat.TcpExt.TCPDSACKUndo = &value
- case "TCPLossUndo":
- procNetstat.TcpExt.TCPLossUndo = &value
- case "TCPLostRetransmit":
- procNetstat.TcpExt.TCPLostRetransmit = &value
- case "TCPRenoFailures":
- procNetstat.TcpExt.TCPRenoFailures = &value
- case "TCPSackFailures":
- procNetstat.TcpExt.TCPSackFailures = &value
- case "TCPLossFailures":
- procNetstat.TcpExt.TCPLossFailures = &value
- case "TCPFastRetrans":
- procNetstat.TcpExt.TCPFastRetrans = &value
- case "TCPSlowStartRetrans":
- procNetstat.TcpExt.TCPSlowStartRetrans = &value
- case "TCPTimeouts":
- procNetstat.TcpExt.TCPTimeouts = &value
- case "TCPLossProbes":
- procNetstat.TcpExt.TCPLossProbes = &value
- case "TCPLossProbeRecovery":
- procNetstat.TcpExt.TCPLossProbeRecovery = &value
- case "TCPRenoRecoveryFail":
- procNetstat.TcpExt.TCPRenoRecoveryFail = &value
- case "TCPSackRecoveryFail":
- procNetstat.TcpExt.TCPSackRecoveryFail = &value
- case "TCPRcvCollapsed":
- procNetstat.TcpExt.TCPRcvCollapsed = &value
- case "TCPDSACKOldSent":
- procNetstat.TcpExt.TCPDSACKOldSent = &value
- case "TCPDSACKOfoSent":
- procNetstat.TcpExt.TCPDSACKOfoSent = &value
- case "TCPDSACKRecv":
- procNetstat.TcpExt.TCPDSACKRecv = &value
- case "TCPDSACKOfoRecv":
- procNetstat.TcpExt.TCPDSACKOfoRecv = &value
- case "TCPAbortOnData":
- procNetstat.TcpExt.TCPAbortOnData = &value
- case "TCPAbortOnClose":
- procNetstat.TcpExt.TCPAbortOnClose = &value
- case "TCPDeferAcceptDrop":
- procNetstat.TcpExt.TCPDeferAcceptDrop = &value
- case "IPReversePathFilter":
- procNetstat.TcpExt.IPReversePathFilter = &value
- case "TCPTimeWaitOverflow":
- procNetstat.TcpExt.TCPTimeWaitOverflow = &value
- case "TCPReqQFullDoCookies":
- procNetstat.TcpExt.TCPReqQFullDoCookies = &value
- case "TCPReqQFullDrop":
- procNetstat.TcpExt.TCPReqQFullDrop = &value
- case "TCPRetransFail":
- procNetstat.TcpExt.TCPRetransFail = &value
- case "TCPRcvCoalesce":
- procNetstat.TcpExt.TCPRcvCoalesce = &value
- case "TCPRcvQDrop":
- procNetstat.TcpExt.TCPRcvQDrop = &value
- case "TCPOFOQueue":
- procNetstat.TcpExt.TCPOFOQueue = &value
- case "TCPOFODrop":
- procNetstat.TcpExt.TCPOFODrop = &value
- case "TCPOFOMerge":
- procNetstat.TcpExt.TCPOFOMerge = &value
- case "TCPChallengeACK":
- procNetstat.TcpExt.TCPChallengeACK = &value
- case "TCPSYNChallenge":
- procNetstat.TcpExt.TCPSYNChallenge = &value
- case "TCPFastOpenActive":
- procNetstat.TcpExt.TCPFastOpenActive = &value
- case "TCPFastOpenActiveFail":
- procNetstat.TcpExt.TCPFastOpenActiveFail = &value
- case "TCPFastOpenPassive":
- procNetstat.TcpExt.TCPFastOpenPassive = &value
- case "TCPFastOpenPassiveFail":
- procNetstat.TcpExt.TCPFastOpenPassiveFail = &value
- case "TCPFastOpenListenOverflow":
- procNetstat.TcpExt.TCPFastOpenListenOverflow = &value
- case "TCPFastOpenCookieReqd":
- procNetstat.TcpExt.TCPFastOpenCookieReqd = &value
- case "TCPFastOpenBlackhole":
- procNetstat.TcpExt.TCPFastOpenBlackhole = &value
- case "TCPSpuriousRtxHostQueues":
- procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value
- case "BusyPollRxPackets":
- procNetstat.TcpExt.BusyPollRxPackets = &value
- case "TCPAutoCorking":
- procNetstat.TcpExt.TCPAutoCorking = &value
- case "TCPFromZeroWindowAdv":
- procNetstat.TcpExt.TCPFromZeroWindowAdv = &value
- case "TCPToZeroWindowAdv":
- procNetstat.TcpExt.TCPToZeroWindowAdv = &value
- case "TCPWantZeroWindowAdv":
- procNetstat.TcpExt.TCPWantZeroWindowAdv = &value
- case "TCPSynRetrans":
- procNetstat.TcpExt.TCPSynRetrans = &value
- case "TCPOrigDataSent":
- procNetstat.TcpExt.TCPOrigDataSent = &value
- case "TCPHystartTrainDetect":
- procNetstat.TcpExt.TCPHystartTrainDetect = &value
- case "TCPHystartTrainCwnd":
- procNetstat.TcpExt.TCPHystartTrainCwnd = &value
- case "TCPHystartDelayDetect":
- procNetstat.TcpExt.TCPHystartDelayDetect = &value
- case "TCPHystartDelayCwnd":
- procNetstat.TcpExt.TCPHystartDelayCwnd = &value
- case "TCPACKSkippedSynRecv":
- procNetstat.TcpExt.TCPACKSkippedSynRecv = &value
- case "TCPACKSkippedPAWS":
- procNetstat.TcpExt.TCPACKSkippedPAWS = &value
- case "TCPACKSkippedSeq":
- procNetstat.TcpExt.TCPACKSkippedSeq = &value
- case "TCPACKSkippedFinWait2":
- procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value
- case "TCPACKSkippedTimeWait":
- procNetstat.TcpExt.TCPACKSkippedTimeWait = &value
- case "TCPACKSkippedChallenge":
- procNetstat.TcpExt.TCPACKSkippedChallenge = &value
- case "TCPWinProbe":
- procNetstat.TcpExt.TCPWinProbe = &value
- case "TCPKeepAlive":
- procNetstat.TcpExt.TCPKeepAlive = &value
- case "TCPMTUPFail":
- procNetstat.TcpExt.TCPMTUPFail = &value
- case "TCPMTUPSuccess":
- procNetstat.TcpExt.TCPMTUPSuccess = &value
- case "TCPWqueueTooBig":
- procNetstat.TcpExt.TCPWqueueTooBig = &value
- }
- case "IpExt":
- switch key {
- case "InNoRoutes":
- procNetstat.IpExt.InNoRoutes = &value
- case "InTruncatedPkts":
- procNetstat.IpExt.InTruncatedPkts = &value
- case "InMcastPkts":
- procNetstat.IpExt.InMcastPkts = &value
- case "OutMcastPkts":
- procNetstat.IpExt.OutMcastPkts = &value
- case "InBcastPkts":
- procNetstat.IpExt.InBcastPkts = &value
- case "OutBcastPkts":
- procNetstat.IpExt.OutBcastPkts = &value
- case "InOctets":
- procNetstat.IpExt.InOctets = &value
- case "OutOctets":
- procNetstat.IpExt.OutOctets = &value
- case "InMcastOctets":
- procNetstat.IpExt.InMcastOctets = &value
- case "OutMcastOctets":
- procNetstat.IpExt.OutMcastOctets = &value
- case "InBcastOctets":
- procNetstat.IpExt.InBcastOctets = &value
- case "OutBcastOctets":
- procNetstat.IpExt.OutBcastOctets = &value
- case "InCsumErrors":
- procNetstat.IpExt.InCsumErrors = &value
- case "InNoECTPkts":
- procNetstat.IpExt.InNoECTPkts = &value
- case "InECT1Pkts":
- procNetstat.IpExt.InECT1Pkts = &value
- case "InECT0Pkts":
- procNetstat.IpExt.InECT0Pkts = &value
- case "InCEPkts":
- procNetstat.IpExt.InCEPkts = &value
- case "ReasmOverlaps":
- procNetstat.IpExt.ReasmOverlaps = &value
- }
- }
- }
- }
- return procNetstat, scanner.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go
deleted file mode 100644
index 0f8f847f9..000000000
--- a/vendor/github.com/prometheus/procfs/proc_ns.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
- "os"
- "strconv"
- "strings"
-)
-
-// Namespace represents a single namespace of a process.
-type Namespace struct {
- Type string // Namespace type.
- Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match.
-}
-
-// Namespaces contains all of the namespaces that the process is contained in.
-type Namespaces map[string]Namespace
-
-// Namespaces reads from /proc//ns/* to get the namespaces of which the
-// process is a member.
-func (p Proc) Namespaces() (Namespaces, error) {
- d, err := os.Open(p.path("ns"))
- if err != nil {
- return nil, err
- }
- defer d.Close()
-
- names, err := d.Readdirnames(-1)
- if err != nil {
- return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err)
- }
-
- ns := make(Namespaces, len(names))
- for _, name := range names {
- target, err := os.Readlink(p.path("ns", name))
- if err != nil {
- return nil, err
- }
-
- fields := strings.SplitN(target, ":", 2)
- if len(fields) != 2 {
- return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target)
- }
-
- typ := fields[0]
- inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
- if err != nil {
- return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err)
- }
-
- ns[name] = Namespace{typ, uint32(inode)}
- }
-
- return ns, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go
deleted file mode 100644
index ccd35f153..000000000
--- a/vendor/github.com/prometheus/procfs/proc_psi.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-// The PSI / pressure interface is described at
-// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt
-// Each resource (cpu, io, memory, ...) is exposed as a single file.
-// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure.
-// Each line contains several averages (over n seconds) and a total in µs.
-//
-// Example io pressure file:
-// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362
-// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
-
-// PSILine is a single line of values as returned by `/proc/pressure/*`.
-//
-// The Avg entries are averages over n seconds, as a percentage.
-// The Total line is in microseconds.
-type PSILine struct {
- Avg10 float64
- Avg60 float64
- Avg300 float64
- Total uint64
-}
-
-// PSIStats represent pressure stall information from /proc/pressure/*
-//
-// "Some" indicates the share of time in which at least some tasks are stalled.
-// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously.
-type PSIStats struct {
- Some *PSILine
- Full *PSILine
-}
-
-// PSIStatsForResource reads pressure stall information for the specified
-// resource from /proc/pressure/. At time of writing this can be
-// either "cpu", "memory" or "io".
-func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
- if err != nil {
- return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
- }
-
- return parsePSIStats(bytes.NewReader(data))
-}
-
-// parsePSIStats parses the specified file for pressure stall information.
-func parsePSIStats(r io.Reader) (PSIStats, error) {
- psiStats := PSIStats{}
-
- scanner := bufio.NewScanner(r)
- for scanner.Scan() {
- l := scanner.Text()
- prefix := strings.Split(l, " ")[0]
- switch prefix {
- case "some":
- psi := PSILine{}
- _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
- if err != nil {
- return PSIStats{}, err
- }
- psiStats.Some = &psi
- case "full":
- psi := PSILine{}
- _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
- if err != nil {
- return PSIStats{}, err
- }
- psiStats.Full = &psi
- default:
- // If we encounter a line with an unknown prefix, ignore it and move on
- // Should new measurement types be added in the future we'll simply ignore them instead
- // of erroring on retrieval
- continue
- }
- }
-
- return psiStats, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go
deleted file mode 100644
index 09060e820..000000000
--- a/vendor/github.com/prometheus/procfs/proc_smaps.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !windows
-// +build !windows
-
-package procfs
-
-import (
- "bufio"
- "errors"
- "fmt"
- "os"
- "regexp"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-var (
- // match the header line before each mapped zone in `/proc/pid/smaps`.
- procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
-)
-
-type ProcSMapsRollup struct {
- // Amount of the mapping that is currently resident in RAM.
- Rss uint64
- // Process's proportional share of this mapping.
- Pss uint64
- // Size in bytes of clean shared pages.
- SharedClean uint64
- // Size in bytes of dirty shared pages.
- SharedDirty uint64
- // Size in bytes of clean private pages.
- PrivateClean uint64
- // Size in bytes of dirty private pages.
- PrivateDirty uint64
- // Amount of memory currently marked as referenced or accessed.
- Referenced uint64
- // Amount of memory that does not belong to any file.
- Anonymous uint64
- // Amount would-be-anonymous memory currently on swap.
- Swap uint64
- // Process's proportional memory on swap.
- SwapPss uint64
-}
-
-// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the
-// process.
-//
-// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will
-// we read and summed.
-func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) {
- data, err := util.ReadFileNoStat(p.path("smaps_rollup"))
- if err != nil && os.IsNotExist(err) {
- return p.procSMapsRollupManual()
- }
- if err != nil {
- return ProcSMapsRollup{}, err
- }
-
- lines := strings.Split(string(data), "\n")
- smaps := ProcSMapsRollup{}
-
- // skip first line which don't contains information we need
- lines = lines[1:]
- for _, line := range lines {
- if line == "" {
- continue
- }
-
- if err := smaps.parseLine(line); err != nil {
- return ProcSMapsRollup{}, err
- }
- }
-
- return smaps, nil
-}
-
-// Read /proc/pid/smaps and do the roll-up in Go code.
-func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
- file, err := os.Open(p.path("smaps"))
- if err != nil {
- return ProcSMapsRollup{}, err
- }
- defer file.Close()
-
- smaps := ProcSMapsRollup{}
- scan := bufio.NewScanner(file)
-
- for scan.Scan() {
- line := scan.Text()
-
- if procSMapsHeaderLine.MatchString(line) {
- continue
- }
-
- if err := smaps.parseLine(line); err != nil {
- return ProcSMapsRollup{}, err
- }
- }
-
- return smaps, nil
-}
-
-func (s *ProcSMapsRollup) parseLine(line string) error {
- kv := strings.SplitN(line, ":", 2)
- if len(kv) != 2 {
- fmt.Println(line)
- return errors.New("invalid net/dev line, missing colon")
- }
-
- k := kv[0]
- if k == "VmFlags" {
- return nil
- }
-
- v := strings.TrimSpace(kv[1])
- v = strings.TrimSuffix(v, " kB")
-
- vKBytes, err := strconv.ParseUint(v, 10, 64)
- if err != nil {
- return err
- }
- vBytes := vKBytes * 1024
-
- s.addValue(k, vBytes)
-
- return nil
-}
-
-func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) {
- switch k {
- case "Rss":
- s.Rss += vUintBytes
- case "Pss":
- s.Pss += vUintBytes
- case "Shared_Clean":
- s.SharedClean += vUintBytes
- case "Shared_Dirty":
- s.SharedDirty += vUintBytes
- case "Private_Clean":
- s.PrivateClean += vUintBytes
- case "Private_Dirty":
- s.PrivateDirty += vUintBytes
- case "Referenced":
- s.Referenced += vUintBytes
- case "Anonymous":
- s.Anonymous += vUintBytes
- case "Swap":
- s.Swap += vUintBytes
- case "SwapPss":
- s.SwapPss += vUintBytes
- }
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go
deleted file mode 100644
index b9d2cf642..000000000
--- a/vendor/github.com/prometheus/procfs/proc_snmp.go
+++ /dev/null
@@ -1,353 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// ProcSnmp models the content of /proc//net/snmp.
-type ProcSnmp struct {
- // The process ID.
- PID int
- Ip
- Icmp
- IcmpMsg
- Tcp
- Udp
- UdpLite
-}
-
-type Ip struct { // nolint:revive
- Forwarding *float64
- DefaultTTL *float64
- InReceives *float64
- InHdrErrors *float64
- InAddrErrors *float64
- ForwDatagrams *float64
- InUnknownProtos *float64
- InDiscards *float64
- InDelivers *float64
- OutRequests *float64
- OutDiscards *float64
- OutNoRoutes *float64
- ReasmTimeout *float64
- ReasmReqds *float64
- ReasmOKs *float64
- ReasmFails *float64
- FragOKs *float64
- FragFails *float64
- FragCreates *float64
-}
-
-type Icmp struct { // nolint:revive
- InMsgs *float64
- InErrors *float64
- InCsumErrors *float64
- InDestUnreachs *float64
- InTimeExcds *float64
- InParmProbs *float64
- InSrcQuenchs *float64
- InRedirects *float64
- InEchos *float64
- InEchoReps *float64
- InTimestamps *float64
- InTimestampReps *float64
- InAddrMasks *float64
- InAddrMaskReps *float64
- OutMsgs *float64
- OutErrors *float64
- OutDestUnreachs *float64
- OutTimeExcds *float64
- OutParmProbs *float64
- OutSrcQuenchs *float64
- OutRedirects *float64
- OutEchos *float64
- OutEchoReps *float64
- OutTimestamps *float64
- OutTimestampReps *float64
- OutAddrMasks *float64
- OutAddrMaskReps *float64
-}
-
-type IcmpMsg struct {
- InType3 *float64
- OutType3 *float64
-}
-
-type Tcp struct { // nolint:revive
- RtoAlgorithm *float64
- RtoMin *float64
- RtoMax *float64
- MaxConn *float64
- ActiveOpens *float64
- PassiveOpens *float64
- AttemptFails *float64
- EstabResets *float64
- CurrEstab *float64
- InSegs *float64
- OutSegs *float64
- RetransSegs *float64
- InErrs *float64
- OutRsts *float64
- InCsumErrors *float64
-}
-
-type Udp struct { // nolint:revive
- InDatagrams *float64
- NoPorts *float64
- InErrors *float64
- OutDatagrams *float64
- RcvbufErrors *float64
- SndbufErrors *float64
- InCsumErrors *float64
- IgnoredMulti *float64
-}
-
-type UdpLite struct { // nolint:revive
- InDatagrams *float64
- NoPorts *float64
- InErrors *float64
- OutDatagrams *float64
- RcvbufErrors *float64
- SndbufErrors *float64
- InCsumErrors *float64
- IgnoredMulti *float64
-}
-
-func (p Proc) Snmp() (ProcSnmp, error) {
- filename := p.path("net/snmp")
- data, err := util.ReadFileNoStat(filename)
- if err != nil {
- return ProcSnmp{PID: p.PID}, err
- }
- procSnmp, err := parseSnmp(bytes.NewReader(data), filename)
- procSnmp.PID = p.PID
- return procSnmp, err
-}
-
-// parseSnmp parses the metrics from proc//net/snmp file
-// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}).
-func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
- var (
- scanner = bufio.NewScanner(r)
- procSnmp = ProcSnmp{}
- )
-
- for scanner.Scan() {
- nameParts := strings.Split(scanner.Text(), " ")
- scanner.Scan()
- valueParts := strings.Split(scanner.Text(), " ")
- // Remove trailing :.
- protocol := strings.TrimSuffix(nameParts[0], ":")
- if len(nameParts) != len(valueParts) {
- return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
- ErrFileParse, fileName, protocol)
- }
- for i := 1; i < len(nameParts); i++ {
- value, err := strconv.ParseFloat(valueParts[i], 64)
- if err != nil {
- return procSnmp, err
- }
- key := nameParts[i]
-
- switch protocol {
- case "Ip":
- switch key {
- case "Forwarding":
- procSnmp.Ip.Forwarding = &value
- case "DefaultTTL":
- procSnmp.Ip.DefaultTTL = &value
- case "InReceives":
- procSnmp.Ip.InReceives = &value
- case "InHdrErrors":
- procSnmp.Ip.InHdrErrors = &value
- case "InAddrErrors":
- procSnmp.Ip.InAddrErrors = &value
- case "ForwDatagrams":
- procSnmp.Ip.ForwDatagrams = &value
- case "InUnknownProtos":
- procSnmp.Ip.InUnknownProtos = &value
- case "InDiscards":
- procSnmp.Ip.InDiscards = &value
- case "InDelivers":
- procSnmp.Ip.InDelivers = &value
- case "OutRequests":
- procSnmp.Ip.OutRequests = &value
- case "OutDiscards":
- procSnmp.Ip.OutDiscards = &value
- case "OutNoRoutes":
- procSnmp.Ip.OutNoRoutes = &value
- case "ReasmTimeout":
- procSnmp.Ip.ReasmTimeout = &value
- case "ReasmReqds":
- procSnmp.Ip.ReasmReqds = &value
- case "ReasmOKs":
- procSnmp.Ip.ReasmOKs = &value
- case "ReasmFails":
- procSnmp.Ip.ReasmFails = &value
- case "FragOKs":
- procSnmp.Ip.FragOKs = &value
- case "FragFails":
- procSnmp.Ip.FragFails = &value
- case "FragCreates":
- procSnmp.Ip.FragCreates = &value
- }
- case "Icmp":
- switch key {
- case "InMsgs":
- procSnmp.Icmp.InMsgs = &value
- case "InErrors":
- procSnmp.Icmp.InErrors = &value
- case "InCsumErrors":
- procSnmp.Icmp.InCsumErrors = &value
- case "InDestUnreachs":
- procSnmp.Icmp.InDestUnreachs = &value
- case "InTimeExcds":
- procSnmp.Icmp.InTimeExcds = &value
- case "InParmProbs":
- procSnmp.Icmp.InParmProbs = &value
- case "InSrcQuenchs":
- procSnmp.Icmp.InSrcQuenchs = &value
- case "InRedirects":
- procSnmp.Icmp.InRedirects = &value
- case "InEchos":
- procSnmp.Icmp.InEchos = &value
- case "InEchoReps":
- procSnmp.Icmp.InEchoReps = &value
- case "InTimestamps":
- procSnmp.Icmp.InTimestamps = &value
- case "InTimestampReps":
- procSnmp.Icmp.InTimestampReps = &value
- case "InAddrMasks":
- procSnmp.Icmp.InAddrMasks = &value
- case "InAddrMaskReps":
- procSnmp.Icmp.InAddrMaskReps = &value
- case "OutMsgs":
- procSnmp.Icmp.OutMsgs = &value
- case "OutErrors":
- procSnmp.Icmp.OutErrors = &value
- case "OutDestUnreachs":
- procSnmp.Icmp.OutDestUnreachs = &value
- case "OutTimeExcds":
- procSnmp.Icmp.OutTimeExcds = &value
- case "OutParmProbs":
- procSnmp.Icmp.OutParmProbs = &value
- case "OutSrcQuenchs":
- procSnmp.Icmp.OutSrcQuenchs = &value
- case "OutRedirects":
- procSnmp.Icmp.OutRedirects = &value
- case "OutEchos":
- procSnmp.Icmp.OutEchos = &value
- case "OutEchoReps":
- procSnmp.Icmp.OutEchoReps = &value
- case "OutTimestamps":
- procSnmp.Icmp.OutTimestamps = &value
- case "OutTimestampReps":
- procSnmp.Icmp.OutTimestampReps = &value
- case "OutAddrMasks":
- procSnmp.Icmp.OutAddrMasks = &value
- case "OutAddrMaskReps":
- procSnmp.Icmp.OutAddrMaskReps = &value
- }
- case "IcmpMsg":
- switch key {
- case "InType3":
- procSnmp.IcmpMsg.InType3 = &value
- case "OutType3":
- procSnmp.IcmpMsg.OutType3 = &value
- }
- case "Tcp":
- switch key {
- case "RtoAlgorithm":
- procSnmp.Tcp.RtoAlgorithm = &value
- case "RtoMin":
- procSnmp.Tcp.RtoMin = &value
- case "RtoMax":
- procSnmp.Tcp.RtoMax = &value
- case "MaxConn":
- procSnmp.Tcp.MaxConn = &value
- case "ActiveOpens":
- procSnmp.Tcp.ActiveOpens = &value
- case "PassiveOpens":
- procSnmp.Tcp.PassiveOpens = &value
- case "AttemptFails":
- procSnmp.Tcp.AttemptFails = &value
- case "EstabResets":
- procSnmp.Tcp.EstabResets = &value
- case "CurrEstab":
- procSnmp.Tcp.CurrEstab = &value
- case "InSegs":
- procSnmp.Tcp.InSegs = &value
- case "OutSegs":
- procSnmp.Tcp.OutSegs = &value
- case "RetransSegs":
- procSnmp.Tcp.RetransSegs = &value
- case "InErrs":
- procSnmp.Tcp.InErrs = &value
- case "OutRsts":
- procSnmp.Tcp.OutRsts = &value
- case "InCsumErrors":
- procSnmp.Tcp.InCsumErrors = &value
- }
- case "Udp":
- switch key {
- case "InDatagrams":
- procSnmp.Udp.InDatagrams = &value
- case "NoPorts":
- procSnmp.Udp.NoPorts = &value
- case "InErrors":
- procSnmp.Udp.InErrors = &value
- case "OutDatagrams":
- procSnmp.Udp.OutDatagrams = &value
- case "RcvbufErrors":
- procSnmp.Udp.RcvbufErrors = &value
- case "SndbufErrors":
- procSnmp.Udp.SndbufErrors = &value
- case "InCsumErrors":
- procSnmp.Udp.InCsumErrors = &value
- case "IgnoredMulti":
- procSnmp.Udp.IgnoredMulti = &value
- }
- case "UdpLite":
- switch key {
- case "InDatagrams":
- procSnmp.UdpLite.InDatagrams = &value
- case "NoPorts":
- procSnmp.UdpLite.NoPorts = &value
- case "InErrors":
- procSnmp.UdpLite.InErrors = &value
- case "OutDatagrams":
- procSnmp.UdpLite.OutDatagrams = &value
- case "RcvbufErrors":
- procSnmp.UdpLite.RcvbufErrors = &value
- case "SndbufErrors":
- procSnmp.UdpLite.SndbufErrors = &value
- case "InCsumErrors":
- procSnmp.UdpLite.InCsumErrors = &value
- case "IgnoredMulti":
- procSnmp.UdpLite.IgnoredMulti = &value
- }
- }
- }
- }
- return procSnmp, scanner.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go
deleted file mode 100644
index 3059cc6a1..000000000
--- a/vendor/github.com/prometheus/procfs/proc_snmp6.go
+++ /dev/null
@@ -1,381 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "errors"
- "io"
- "os"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// ProcSnmp6 models the content of /proc//net/snmp6.
-type ProcSnmp6 struct {
- // The process ID.
- PID int
- Ip6
- Icmp6
- Udp6
- UdpLite6
-}
-
-type Ip6 struct { // nolint:revive
- InReceives *float64
- InHdrErrors *float64
- InTooBigErrors *float64
- InNoRoutes *float64
- InAddrErrors *float64
- InUnknownProtos *float64
- InTruncatedPkts *float64
- InDiscards *float64
- InDelivers *float64
- OutForwDatagrams *float64
- OutRequests *float64
- OutDiscards *float64
- OutNoRoutes *float64
- ReasmTimeout *float64
- ReasmReqds *float64
- ReasmOKs *float64
- ReasmFails *float64
- FragOKs *float64
- FragFails *float64
- FragCreates *float64
- InMcastPkts *float64
- OutMcastPkts *float64
- InOctets *float64
- OutOctets *float64
- InMcastOctets *float64
- OutMcastOctets *float64
- InBcastOctets *float64
- OutBcastOctets *float64
- InNoECTPkts *float64
- InECT1Pkts *float64
- InECT0Pkts *float64
- InCEPkts *float64
-}
-
-type Icmp6 struct {
- InMsgs *float64
- InErrors *float64
- OutMsgs *float64
- OutErrors *float64
- InCsumErrors *float64
- InDestUnreachs *float64
- InPktTooBigs *float64
- InTimeExcds *float64
- InParmProblems *float64
- InEchos *float64
- InEchoReplies *float64
- InGroupMembQueries *float64
- InGroupMembResponses *float64
- InGroupMembReductions *float64
- InRouterSolicits *float64
- InRouterAdvertisements *float64
- InNeighborSolicits *float64
- InNeighborAdvertisements *float64
- InRedirects *float64
- InMLDv2Reports *float64
- OutDestUnreachs *float64
- OutPktTooBigs *float64
- OutTimeExcds *float64
- OutParmProblems *float64
- OutEchos *float64
- OutEchoReplies *float64
- OutGroupMembQueries *float64
- OutGroupMembResponses *float64
- OutGroupMembReductions *float64
- OutRouterSolicits *float64
- OutRouterAdvertisements *float64
- OutNeighborSolicits *float64
- OutNeighborAdvertisements *float64
- OutRedirects *float64
- OutMLDv2Reports *float64
- InType1 *float64
- InType134 *float64
- InType135 *float64
- InType136 *float64
- InType143 *float64
- OutType133 *float64
- OutType135 *float64
- OutType136 *float64
- OutType143 *float64
-}
-
-type Udp6 struct { // nolint:revive
- InDatagrams *float64
- NoPorts *float64
- InErrors *float64
- OutDatagrams *float64
- RcvbufErrors *float64
- SndbufErrors *float64
- InCsumErrors *float64
- IgnoredMulti *float64
-}
-
-type UdpLite6 struct { // nolint:revive
- InDatagrams *float64
- NoPorts *float64
- InErrors *float64
- OutDatagrams *float64
- RcvbufErrors *float64
- SndbufErrors *float64
- InCsumErrors *float64
-}
-
-func (p Proc) Snmp6() (ProcSnmp6, error) {
- filename := p.path("net/snmp6")
- data, err := util.ReadFileNoStat(filename)
- if err != nil {
- // On systems with IPv6 disabled, this file won't exist.
- // Do nothing.
- if errors.Is(err, os.ErrNotExist) {
- return ProcSnmp6{PID: p.PID}, nil
- }
-
- return ProcSnmp6{PID: p.PID}, err
- }
-
- procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data))
- procSnmp6.PID = p.PID
- return procSnmp6, err
-}
-
-// parseSnmp6 parses the metrics from proc//net/snmp6 file
-// and returns a map contains those metrics.
-func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
- var (
- scanner = bufio.NewScanner(r)
- procSnmp6 = ProcSnmp6{}
- )
-
- for scanner.Scan() {
- stat := strings.Fields(scanner.Text())
- if len(stat) < 2 {
- continue
- }
- // Expect to have "6" in metric name, skip line otherwise
- if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 {
- protocol := stat[0][:sixIndex+1]
- key := stat[0][sixIndex+1:]
- value, err := strconv.ParseFloat(stat[1], 64)
- if err != nil {
- return procSnmp6, err
- }
-
- switch protocol {
- case "Ip6":
- switch key {
- case "InReceives":
- procSnmp6.Ip6.InReceives = &value
- case "InHdrErrors":
- procSnmp6.Ip6.InHdrErrors = &value
- case "InTooBigErrors":
- procSnmp6.Ip6.InTooBigErrors = &value
- case "InNoRoutes":
- procSnmp6.Ip6.InNoRoutes = &value
- case "InAddrErrors":
- procSnmp6.Ip6.InAddrErrors = &value
- case "InUnknownProtos":
- procSnmp6.Ip6.InUnknownProtos = &value
- case "InTruncatedPkts":
- procSnmp6.Ip6.InTruncatedPkts = &value
- case "InDiscards":
- procSnmp6.Ip6.InDiscards = &value
- case "InDelivers":
- procSnmp6.Ip6.InDelivers = &value
- case "OutForwDatagrams":
- procSnmp6.Ip6.OutForwDatagrams = &value
- case "OutRequests":
- procSnmp6.Ip6.OutRequests = &value
- case "OutDiscards":
- procSnmp6.Ip6.OutDiscards = &value
- case "OutNoRoutes":
- procSnmp6.Ip6.OutNoRoutes = &value
- case "ReasmTimeout":
- procSnmp6.Ip6.ReasmTimeout = &value
- case "ReasmReqds":
- procSnmp6.Ip6.ReasmReqds = &value
- case "ReasmOKs":
- procSnmp6.Ip6.ReasmOKs = &value
- case "ReasmFails":
- procSnmp6.Ip6.ReasmFails = &value
- case "FragOKs":
- procSnmp6.Ip6.FragOKs = &value
- case "FragFails":
- procSnmp6.Ip6.FragFails = &value
- case "FragCreates":
- procSnmp6.Ip6.FragCreates = &value
- case "InMcastPkts":
- procSnmp6.Ip6.InMcastPkts = &value
- case "OutMcastPkts":
- procSnmp6.Ip6.OutMcastPkts = &value
- case "InOctets":
- procSnmp6.Ip6.InOctets = &value
- case "OutOctets":
- procSnmp6.Ip6.OutOctets = &value
- case "InMcastOctets":
- procSnmp6.Ip6.InMcastOctets = &value
- case "OutMcastOctets":
- procSnmp6.Ip6.OutMcastOctets = &value
- case "InBcastOctets":
- procSnmp6.Ip6.InBcastOctets = &value
- case "OutBcastOctets":
- procSnmp6.Ip6.OutBcastOctets = &value
- case "InNoECTPkts":
- procSnmp6.Ip6.InNoECTPkts = &value
- case "InECT1Pkts":
- procSnmp6.Ip6.InECT1Pkts = &value
- case "InECT0Pkts":
- procSnmp6.Ip6.InECT0Pkts = &value
- case "InCEPkts":
- procSnmp6.Ip6.InCEPkts = &value
-
- }
- case "Icmp6":
- switch key {
- case "InMsgs":
- procSnmp6.Icmp6.InMsgs = &value
- case "InErrors":
- procSnmp6.Icmp6.InErrors = &value
- case "OutMsgs":
- procSnmp6.Icmp6.OutMsgs = &value
- case "OutErrors":
- procSnmp6.Icmp6.OutErrors = &value
- case "InCsumErrors":
- procSnmp6.Icmp6.InCsumErrors = &value
- case "InDestUnreachs":
- procSnmp6.Icmp6.InDestUnreachs = &value
- case "InPktTooBigs":
- procSnmp6.Icmp6.InPktTooBigs = &value
- case "InTimeExcds":
- procSnmp6.Icmp6.InTimeExcds = &value
- case "InParmProblems":
- procSnmp6.Icmp6.InParmProblems = &value
- case "InEchos":
- procSnmp6.Icmp6.InEchos = &value
- case "InEchoReplies":
- procSnmp6.Icmp6.InEchoReplies = &value
- case "InGroupMembQueries":
- procSnmp6.Icmp6.InGroupMembQueries = &value
- case "InGroupMembResponses":
- procSnmp6.Icmp6.InGroupMembResponses = &value
- case "InGroupMembReductions":
- procSnmp6.Icmp6.InGroupMembReductions = &value
- case "InRouterSolicits":
- procSnmp6.Icmp6.InRouterSolicits = &value
- case "InRouterAdvertisements":
- procSnmp6.Icmp6.InRouterAdvertisements = &value
- case "InNeighborSolicits":
- procSnmp6.Icmp6.InNeighborSolicits = &value
- case "InNeighborAdvertisements":
- procSnmp6.Icmp6.InNeighborAdvertisements = &value
- case "InRedirects":
- procSnmp6.Icmp6.InRedirects = &value
- case "InMLDv2Reports":
- procSnmp6.Icmp6.InMLDv2Reports = &value
- case "OutDestUnreachs":
- procSnmp6.Icmp6.OutDestUnreachs = &value
- case "OutPktTooBigs":
- procSnmp6.Icmp6.OutPktTooBigs = &value
- case "OutTimeExcds":
- procSnmp6.Icmp6.OutTimeExcds = &value
- case "OutParmProblems":
- procSnmp6.Icmp6.OutParmProblems = &value
- case "OutEchos":
- procSnmp6.Icmp6.OutEchos = &value
- case "OutEchoReplies":
- procSnmp6.Icmp6.OutEchoReplies = &value
- case "OutGroupMembQueries":
- procSnmp6.Icmp6.OutGroupMembQueries = &value
- case "OutGroupMembResponses":
- procSnmp6.Icmp6.OutGroupMembResponses = &value
- case "OutGroupMembReductions":
- procSnmp6.Icmp6.OutGroupMembReductions = &value
- case "OutRouterSolicits":
- procSnmp6.Icmp6.OutRouterSolicits = &value
- case "OutRouterAdvertisements":
- procSnmp6.Icmp6.OutRouterAdvertisements = &value
- case "OutNeighborSolicits":
- procSnmp6.Icmp6.OutNeighborSolicits = &value
- case "OutNeighborAdvertisements":
- procSnmp6.Icmp6.OutNeighborAdvertisements = &value
- case "OutRedirects":
- procSnmp6.Icmp6.OutRedirects = &value
- case "OutMLDv2Reports":
- procSnmp6.Icmp6.OutMLDv2Reports = &value
- case "InType1":
- procSnmp6.Icmp6.InType1 = &value
- case "InType134":
- procSnmp6.Icmp6.InType134 = &value
- case "InType135":
- procSnmp6.Icmp6.InType135 = &value
- case "InType136":
- procSnmp6.Icmp6.InType136 = &value
- case "InType143":
- procSnmp6.Icmp6.InType143 = &value
- case "OutType133":
- procSnmp6.Icmp6.OutType133 = &value
- case "OutType135":
- procSnmp6.Icmp6.OutType135 = &value
- case "OutType136":
- procSnmp6.Icmp6.OutType136 = &value
- case "OutType143":
- procSnmp6.Icmp6.OutType143 = &value
- }
- case "Udp6":
- switch key {
- case "InDatagrams":
- procSnmp6.Udp6.InDatagrams = &value
- case "NoPorts":
- procSnmp6.Udp6.NoPorts = &value
- case "InErrors":
- procSnmp6.Udp6.InErrors = &value
- case "OutDatagrams":
- procSnmp6.Udp6.OutDatagrams = &value
- case "RcvbufErrors":
- procSnmp6.Udp6.RcvbufErrors = &value
- case "SndbufErrors":
- procSnmp6.Udp6.SndbufErrors = &value
- case "InCsumErrors":
- procSnmp6.Udp6.InCsumErrors = &value
- case "IgnoredMulti":
- procSnmp6.Udp6.IgnoredMulti = &value
- }
- case "UdpLite6":
- switch key {
- case "InDatagrams":
- procSnmp6.UdpLite6.InDatagrams = &value
- case "NoPorts":
- procSnmp6.UdpLite6.NoPorts = &value
- case "InErrors":
- procSnmp6.UdpLite6.InErrors = &value
- case "OutDatagrams":
- procSnmp6.UdpLite6.OutDatagrams = &value
- case "RcvbufErrors":
- procSnmp6.UdpLite6.RcvbufErrors = &value
- case "SndbufErrors":
- procSnmp6.UdpLite6.SndbufErrors = &value
- case "InCsumErrors":
- procSnmp6.UdpLite6.InCsumErrors = &value
- }
- }
- }
- }
- return procSnmp6, scanner.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
deleted file mode 100644
index 06a8d931c..000000000
--- a/vendor/github.com/prometheus/procfs/proc_stat.go
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bytes"
- "fmt"
- "os"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
-// which required cgo. However, that caused a lot of problems regarding
-// cross-compilation. Alternatives such as running a binary to determine the
-// value, or trying to derive it in some other way were all problematic. After
-// much research it was determined that USER_HZ is actually hardcoded to 100 on
-// all Go-supported platforms as of the time of this writing. This is why we
-// decided to hardcode it here as well. It is not impossible that there could
-// be systems with exceptions, but they should be very exotic edge cases, and
-// in that case, the worst outcome will be two misreported metrics.
-//
-// See also the following discussions:
-//
-// - https://github.com/prometheus/node_exporter/issues/52
-// - https://github.com/prometheus/procfs/pull/2
-// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
-const userHZ = 100
-
-// ProcStat provides status information about the process,
-// read from /proc/[pid]/stat.
-type ProcStat struct {
- // The process ID.
- PID int
- // The filename of the executable.
- Comm string
- // The process state.
- State string
- // The PID of the parent of this process.
- PPID int
- // The process group ID of the process.
- PGRP int
- // The session ID of the process.
- Session int
- // The controlling terminal of the process.
- TTY int
- // The ID of the foreground process group of the controlling terminal of
- // the process.
- TPGID int
- // The kernel flags word of the process.
- Flags uint
- // The number of minor faults the process has made which have not required
- // loading a memory page from disk.
- MinFlt uint
- // The number of minor faults that the process's waited-for children have
- // made.
- CMinFlt uint
- // The number of major faults the process has made which have required
- // loading a memory page from disk.
- MajFlt uint
- // The number of major faults that the process's waited-for children have
- // made.
- CMajFlt uint
- // Amount of time that this process has been scheduled in user mode,
- // measured in clock ticks.
- UTime uint
- // Amount of time that this process has been scheduled in kernel mode,
- // measured in clock ticks.
- STime uint
- // Amount of time that this process's waited-for children have been
- // scheduled in user mode, measured in clock ticks.
- CUTime int
- // Amount of time that this process's waited-for children have been
- // scheduled in kernel mode, measured in clock ticks.
- CSTime int
- // For processes running a real-time scheduling policy, this is the negated
- // scheduling priority, minus one.
- Priority int
- // The nice value, a value in the range 19 (low priority) to -20 (high
- // priority).
- Nice int
- // Number of threads in this process.
- NumThreads int
- // The time the process started after system boot, the value is expressed
- // in clock ticks.
- Starttime uint64
- // Virtual memory size in bytes.
- VSize uint
- // Resident set size in pages.
- RSS int
- // Soft limit in bytes on the rss of the process.
- RSSLimit uint64
- // CPU number last executed on.
- Processor uint
- // Real-time scheduling priority, a number in the range 1 to 99 for processes
- // scheduled under a real-time policy, or 0, for non-real-time processes.
- RTPriority uint
- // Scheduling policy.
- Policy uint
- // Aggregated block I/O delays, measured in clock ticks (centiseconds).
- DelayAcctBlkIOTicks uint64
- // Guest time of the process (time spent running a virtual CPU for a guest
- // operating system), measured in clock ticks.
- GuestTime int
- // Guest time of the process's children, measured in clock ticks.
- CGuestTime int
-
- proc FS
-}
-
-// NewStat returns the current status information of the process.
-//
-// Deprecated: Use p.Stat() instead.
-func (p Proc) NewStat() (ProcStat, error) {
- return p.Stat()
-}
-
-// Stat returns the current status information of the process.
-func (p Proc) Stat() (ProcStat, error) {
- data, err := util.ReadFileNoStat(p.path("stat"))
- if err != nil {
- return ProcStat{}, err
- }
-
- var (
- ignoreInt64 int64
- ignoreUint64 uint64
-
- s = ProcStat{PID: p.PID, proc: p.fs}
- l = bytes.Index(data, []byte("("))
- r = bytes.LastIndex(data, []byte(")"))
- )
-
- if l < 0 || r < 0 {
- return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data)
- }
-
- s.Comm = string(data[l+1 : r])
-
- // Check the following resources for the details about the particular stat
- // fields and their data types:
- // * https://man7.org/linux/man-pages/man5/proc.5.html
- // * https://man7.org/linux/man-pages/man3/scanf.3.html
- _, err = fmt.Fscan(
- bytes.NewBuffer(data[r+2:]),
- &s.State,
- &s.PPID,
- &s.PGRP,
- &s.Session,
- &s.TTY,
- &s.TPGID,
- &s.Flags,
- &s.MinFlt,
- &s.CMinFlt,
- &s.MajFlt,
- &s.CMajFlt,
- &s.UTime,
- &s.STime,
- &s.CUTime,
- &s.CSTime,
- &s.Priority,
- &s.Nice,
- &s.NumThreads,
- &ignoreInt64,
- &s.Starttime,
- &s.VSize,
- &s.RSS,
- &s.RSSLimit,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreInt64,
- &s.Processor,
- &s.RTPriority,
- &s.Policy,
- &s.DelayAcctBlkIOTicks,
- &s.GuestTime,
- &s.CGuestTime,
- )
- if err != nil {
- return ProcStat{}, err
- }
-
- return s, nil
-}
-
-// VirtualMemory returns the virtual memory size in bytes.
-func (s ProcStat) VirtualMemory() uint {
- return s.VSize
-}
-
-// ResidentMemory returns the resident memory size in bytes.
-func (s ProcStat) ResidentMemory() int {
- return s.RSS * os.Getpagesize()
-}
-
-// StartTime returns the unix timestamp of the process in seconds.
-func (s ProcStat) StartTime() (float64, error) {
- stat, err := s.proc.Stat()
- if err != nil {
- return 0, err
- }
- return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
-}
-
-// CPUTime returns the total CPU user and system time in seconds.
-func (s ProcStat) CPUTime() float64 {
- return float64(s.UTime+s.STime) / userHZ
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
deleted file mode 100644
index a055197c6..000000000
--- a/vendor/github.com/prometheus/procfs/proc_status.go
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bytes"
- "math/bits"
- "sort"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// ProcStatus provides status information about the process,
-// read from /proc/[pid]/status.
-type ProcStatus struct {
- // The process ID.
- PID int
- // The process name.
- Name string
-
- // Thread group ID.
- TGID int
- // List of Pid namespace.
- NSpids []uint64
-
- // Peak virtual memory size.
- VmPeak uint64 // nolint:revive
- // Virtual memory size.
- VmSize uint64 // nolint:revive
- // Locked memory size.
- VmLck uint64 // nolint:revive
- // Pinned memory size.
- VmPin uint64 // nolint:revive
- // Peak resident set size.
- VmHWM uint64 // nolint:revive
- // Resident set size (sum of RssAnnon RssFile and RssShmem).
- VmRSS uint64 // nolint:revive
- // Size of resident anonymous memory.
- RssAnon uint64 // nolint:revive
- // Size of resident file mappings.
- RssFile uint64 // nolint:revive
- // Size of resident shared memory.
- RssShmem uint64 // nolint:revive
- // Size of data segments.
- VmData uint64 // nolint:revive
- // Size of stack segments.
- VmStk uint64 // nolint:revive
- // Size of text segments.
- VmExe uint64 // nolint:revive
- // Shared library code size.
- VmLib uint64 // nolint:revive
- // Page table entries size.
- VmPTE uint64 // nolint:revive
- // Size of second-level page tables.
- VmPMD uint64 // nolint:revive
- // Swapped-out virtual memory size by anonymous private.
- VmSwap uint64 // nolint:revive
- // Size of hugetlb memory portions
- HugetlbPages uint64
-
- // Number of voluntary context switches.
- VoluntaryCtxtSwitches uint64
- // Number of involuntary context switches.
- NonVoluntaryCtxtSwitches uint64
-
- // UIDs of the process (Real, effective, saved set, and filesystem UIDs)
- UIDs [4]uint64
- // GIDs of the process (Real, effective, saved set, and filesystem GIDs)
- GIDs [4]uint64
-
- // CpusAllowedList: List of cpu cores processes are allowed to run on.
- CpusAllowedList []uint64
-}
-
-// NewStatus returns the current status information of the process.
-func (p Proc) NewStatus() (ProcStatus, error) {
- data, err := util.ReadFileNoStat(p.path("status"))
- if err != nil {
- return ProcStatus{}, err
- }
-
- s := ProcStatus{PID: p.PID}
-
- lines := strings.Split(string(data), "\n")
- for _, line := range lines {
- if !bytes.Contains([]byte(line), []byte(":")) {
- continue
- }
-
- kv := strings.SplitN(line, ":", 2)
-
- // removes spaces
- k := strings.TrimSpace(kv[0])
- v := strings.TrimSpace(kv[1])
- // removes "kB"
- v = strings.TrimSuffix(v, " kB")
-
- // value to int when possible
- // we can skip error check here, 'cause vKBytes is not used when value is a string
- vKBytes, _ := strconv.ParseUint(v, 10, 64)
- // convert kB to B
- vBytes := vKBytes * 1024
-
- err = s.fillStatus(k, v, vKBytes, vBytes)
- if err != nil {
- return ProcStatus{}, err
- }
- }
-
- return s, nil
-}
-
-func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error {
- switch k {
- case "Tgid":
- s.TGID = int(vUint)
- case "Name":
- s.Name = vString
- case "Uid":
- var err error
- for i, v := range strings.Split(vString, "\t") {
- s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
- if err != nil {
- return err
- }
- }
- case "Gid":
- var err error
- for i, v := range strings.Split(vString, "\t") {
- s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
- if err != nil {
- return err
- }
- }
- case "NSpid":
- s.NSpids = calcNSPidsList(vString)
- case "VmPeak":
- s.VmPeak = vUintBytes
- case "VmSize":
- s.VmSize = vUintBytes
- case "VmLck":
- s.VmLck = vUintBytes
- case "VmPin":
- s.VmPin = vUintBytes
- case "VmHWM":
- s.VmHWM = vUintBytes
- case "VmRSS":
- s.VmRSS = vUintBytes
- case "RssAnon":
- s.RssAnon = vUintBytes
- case "RssFile":
- s.RssFile = vUintBytes
- case "RssShmem":
- s.RssShmem = vUintBytes
- case "VmData":
- s.VmData = vUintBytes
- case "VmStk":
- s.VmStk = vUintBytes
- case "VmExe":
- s.VmExe = vUintBytes
- case "VmLib":
- s.VmLib = vUintBytes
- case "VmPTE":
- s.VmPTE = vUintBytes
- case "VmPMD":
- s.VmPMD = vUintBytes
- case "VmSwap":
- s.VmSwap = vUintBytes
- case "HugetlbPages":
- s.HugetlbPages = vUintBytes
- case "voluntary_ctxt_switches":
- s.VoluntaryCtxtSwitches = vUint
- case "nonvoluntary_ctxt_switches":
- s.NonVoluntaryCtxtSwitches = vUint
- case "Cpus_allowed_list":
- s.CpusAllowedList = calcCpusAllowedList(vString)
- }
-
- return nil
-}
-
-// TotalCtxtSwitches returns the total context switch.
-func (s ProcStatus) TotalCtxtSwitches() uint64 {
- return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
-}
-
-func calcCpusAllowedList(cpuString string) []uint64 {
- s := strings.Split(cpuString, ",")
-
- var g []uint64
-
- for _, cpu := range s {
- // parse cpu ranges, example: 1-3=[1,2,3]
- if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 {
- startCPU, _ := strconv.ParseUint(l[0], 10, 64)
- endCPU, _ := strconv.ParseUint(l[1], 10, 64)
-
- for i := startCPU; i <= endCPU; i++ {
- g = append(g, i)
- }
- } else if len(l) == 1 {
- cpu, _ := strconv.ParseUint(l[0], 10, 64)
- g = append(g, cpu)
- }
-
- }
-
- sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
- return g
-}
-
-func calcNSPidsList(nspidsString string) []uint64 {
- s := strings.Split(nspidsString, " ")
- var nspids []uint64
-
- for _, nspid := range s {
- nspid, _ := strconv.ParseUint(nspid, 10, 64)
- if nspid == 0 {
- continue
- }
- nspids = append(nspids, nspid)
- }
-
- return nspids
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go
deleted file mode 100644
index 5eefbe2ef..000000000
--- a/vendor/github.com/prometheus/procfs/proc_sys.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-func sysctlToPath(sysctl string) string {
- return strings.Replace(sysctl, ".", "/", -1)
-}
-
-func (fs FS) SysctlStrings(sysctl string) ([]string, error) {
- value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl)))
- if err != nil {
- return nil, err
- }
- return strings.Fields(value), nil
-
-}
-
-func (fs FS) SysctlInts(sysctl string) ([]int, error) {
- fields, err := fs.SysctlStrings(sysctl)
- if err != nil {
- return nil, err
- }
-
- values := make([]int, len(fields))
- for i, f := range fields {
- vp := util.NewValueParser(f)
- values[i] = vp.Int()
- if err := vp.Err(); err != nil {
- return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
- }
- }
- return values, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go
deleted file mode 100644
index 5f7f32dc8..000000000
--- a/vendor/github.com/prometheus/procfs/schedstat.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "errors"
- "os"
- "regexp"
- "strconv"
-)
-
-var (
- cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`)
- procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`)
-)
-
-// Schedstat contains scheduler statistics from /proc/schedstat
-//
-// See
-// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt
-// for a detailed description of what these numbers mean.
-//
-// Note the current kernel documentation claims some of the time units are in
-// jiffies when they are actually in nanoseconds since 2.6.23 with the
-// introduction of CFS. A fix to the documentation is pending. See
-// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473
-type Schedstat struct {
- CPUs []*SchedstatCPU
-}
-
-// SchedstatCPU contains the values from one "cpu" line.
-type SchedstatCPU struct {
- CPUNum string
-
- RunningNanoseconds uint64
- WaitingNanoseconds uint64
- RunTimeslices uint64
-}
-
-// ProcSchedstat contains the values from `/proc//schedstat`.
-type ProcSchedstat struct {
- RunningNanoseconds uint64
- WaitingNanoseconds uint64
- RunTimeslices uint64
-}
-
-// Schedstat reads data from `/proc/schedstat`.
-func (fs FS) Schedstat() (*Schedstat, error) {
- file, err := os.Open(fs.proc.Path("schedstat"))
- if err != nil {
- return nil, err
- }
- defer file.Close()
-
- stats := &Schedstat{}
- scanner := bufio.NewScanner(file)
-
- for scanner.Scan() {
- match := cpuLineRE.FindStringSubmatch(scanner.Text())
- if match != nil {
- cpu := &SchedstatCPU{}
- cpu.CPUNum = match[1]
-
- cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64)
- if err != nil {
- continue
- }
-
- cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64)
- if err != nil {
- continue
- }
-
- cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64)
- if err != nil {
- continue
- }
-
- stats.CPUs = append(stats.CPUs, cpu)
- }
- }
-
- return stats, nil
-}
-
-func parseProcSchedstat(contents string) (ProcSchedstat, error) {
- var (
- stats ProcSchedstat
- err error
- )
- match := procLineRE.FindStringSubmatch(contents)
-
- if match != nil {
- stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64)
- if err != nil {
- return stats, err
- }
-
- stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64)
- if err != nil {
- return stats, err
- }
-
- stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64)
- return stats, err
- }
-
- return stats, errors.New("could not parse schedstat")
-}
diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go
deleted file mode 100644
index 8611c9017..000000000
--- a/vendor/github.com/prometheus/procfs/slab.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "regexp"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-var (
- slabSpace = regexp.MustCompile(`\s+`)
- slabVer = regexp.MustCompile(`slabinfo -`)
- slabHeader = regexp.MustCompile(`# name`)
-)
-
-// Slab represents a slab pool in the kernel.
-type Slab struct {
- Name string
- ObjActive int64
- ObjNum int64
- ObjSize int64
- ObjPerSlab int64
- PagesPerSlab int64
- // tunables
- Limit int64
- Batch int64
- SharedFactor int64
- SlabActive int64
- SlabNum int64
- SharedAvail int64
-}
-
-// SlabInfo represents info for all slabs.
-type SlabInfo struct {
- Slabs []*Slab
-}
-
-func shouldParseSlab(line string) bool {
- if slabVer.MatchString(line) {
- return false
- }
- if slabHeader.MatchString(line) {
- return false
- }
- return true
-}
-
-// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1.
-func parseV21SlabEntry(line string) (*Slab, error) {
- // First cleanup whitespace.
- l := slabSpace.ReplaceAllString(line, " ")
- s := strings.Split(l, " ")
- if len(s) != 16 {
- return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line)
- }
- var err error
- i := &Slab{Name: s[0]}
- i.ObjActive, err = strconv.ParseInt(s[1], 10, 64)
- if err != nil {
- return nil, err
- }
- i.ObjNum, err = strconv.ParseInt(s[2], 10, 64)
- if err != nil {
- return nil, err
- }
- i.ObjSize, err = strconv.ParseInt(s[3], 10, 64)
- if err != nil {
- return nil, err
- }
- i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64)
- if err != nil {
- return nil, err
- }
- i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64)
- if err != nil {
- return nil, err
- }
- i.Limit, err = strconv.ParseInt(s[8], 10, 64)
- if err != nil {
- return nil, err
- }
- i.Batch, err = strconv.ParseInt(s[9], 10, 64)
- if err != nil {
- return nil, err
- }
- i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64)
- if err != nil {
- return nil, err
- }
- i.SlabActive, err = strconv.ParseInt(s[13], 10, 64)
- if err != nil {
- return nil, err
- }
- i.SlabNum, err = strconv.ParseInt(s[14], 10, 64)
- if err != nil {
- return nil, err
- }
- i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64)
- if err != nil {
- return nil, err
- }
- return i, nil
-}
-
-// parseSlabInfo21 is used to parse a slabinfo 2.1 file.
-func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) {
- scanner := bufio.NewScanner(r)
- s := SlabInfo{Slabs: []*Slab{}}
- for scanner.Scan() {
- line := scanner.Text()
- if !shouldParseSlab(line) {
- continue
- }
- slab, err := parseV21SlabEntry(line)
- if err != nil {
- return s, err
- }
- s.Slabs = append(s.Slabs, slab)
- }
- return s, nil
-}
-
-// SlabInfo reads data from `/proc/slabinfo`.
-func (fs FS) SlabInfo() (SlabInfo, error) {
- // TODO: Consider passing options to allow for parsing different
- // slabinfo versions. However, slabinfo 2.1 has been stable since
- // kernel 2.6.10 and later.
- data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo"))
- if err != nil {
- return SlabInfo{}, err
- }
-
- return parseSlabInfo21(bytes.NewReader(data))
-}
diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go
deleted file mode 100644
index 28708e074..000000000
--- a/vendor/github.com/prometheus/procfs/softirqs.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Softirqs represents the softirq statistics.
-type Softirqs struct {
- Hi []uint64
- Timer []uint64
- NetTx []uint64
- NetRx []uint64
- Block []uint64
- IRQPoll []uint64
- Tasklet []uint64
- Sched []uint64
- HRTimer []uint64
- RCU []uint64
-}
-
-func (fs FS) Softirqs() (Softirqs, error) {
- fileName := fs.proc.Path("softirqs")
- data, err := util.ReadFileNoStat(fileName)
- if err != nil {
- return Softirqs{}, err
- }
-
- reader := bytes.NewReader(data)
-
- return parseSoftirqs(reader)
-}
-
-func parseSoftirqs(r io.Reader) (Softirqs, error) {
- var (
- softirqs = Softirqs{}
- scanner = bufio.NewScanner(r)
- )
-
- if !scanner.Scan() {
- return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead)
- }
-
- for scanner.Scan() {
- parts := strings.Fields(scanner.Text())
- var err error
-
- // require at least one cpu
- if len(parts) < 2 {
- continue
- }
- switch {
- case parts[0] == "HI:":
- perCPU := parts[1:]
- softirqs.Hi = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "TIMER:":
- perCPU := parts[1:]
- softirqs.Timer = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "NET_TX:":
- perCPU := parts[1:]
- softirqs.NetTx = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "NET_RX:":
- perCPU := parts[1:]
- softirqs.NetRx = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "BLOCK:":
- perCPU := parts[1:]
- softirqs.Block = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "IRQ_POLL:":
- perCPU := parts[1:]
- softirqs.IRQPoll = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "TASKLET:":
- perCPU := parts[1:]
- softirqs.Tasklet = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "SCHED:":
- perCPU := parts[1:]
- softirqs.Sched = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "HRTIMER:":
- perCPU := parts[1:]
- softirqs.HRTimer = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "RCU:":
- perCPU := parts[1:]
- softirqs.RCU = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
- }
- }
- }
- }
-
- if err := scanner.Err(); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err)
- }
-
- return softirqs, scanner.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
deleted file mode 100644
index e36b41c18..000000000
--- a/vendor/github.com/prometheus/procfs/stat.go
+++ /dev/null
@@ -1,258 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/fs"
- "github.com/prometheus/procfs/internal/util"
-)
-
-// CPUStat shows how much time the cpu spend in various stages.
-type CPUStat struct {
- User float64
- Nice float64
- System float64
- Idle float64
- Iowait float64
- IRQ float64
- SoftIRQ float64
- Steal float64
- Guest float64
- GuestNice float64
-}
-
-// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
-// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
-// It is possible to get per-cpu stats by reading `/proc/softirqs`.
-type SoftIRQStat struct {
- Hi uint64
- Timer uint64
- NetTx uint64
- NetRx uint64
- Block uint64
- BlockIoPoll uint64
- Tasklet uint64
- Sched uint64
- Hrtimer uint64
- Rcu uint64
-}
-
-// Stat represents kernel/system statistics.
-type Stat struct {
- // Boot time in seconds since the Epoch.
- BootTime uint64
- // Summed up cpu statistics.
- CPUTotal CPUStat
- // Per-CPU statistics.
- CPU map[int64]CPUStat
- // Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
- IRQTotal uint64
- // Number of times a numbered IRQ was triggered.
- IRQ []uint64
- // Number of times a context switch happened.
- ContextSwitches uint64
- // Number of times a process was created.
- ProcessCreated uint64
- // Number of processes currently running.
- ProcessesRunning uint64
- // Number of processes currently blocked (waiting for IO).
- ProcessesBlocked uint64
- // Number of times a softirq was scheduled.
- SoftIRQTotal uint64
- // Detailed softirq statistics.
- SoftIRQ SoftIRQStat
-}
-
-// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
-func parseCPUStat(line string) (CPUStat, int64, error) {
- cpuStat := CPUStat{}
- var cpu string
-
- count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f",
- &cpu,
- &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle,
- &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal,
- &cpuStat.Guest, &cpuStat.GuestNice)
-
- if err != nil && err != io.EOF {
- return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
- }
- if count == 0 {
- return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
- }
-
- cpuStat.User /= userHZ
- cpuStat.Nice /= userHZ
- cpuStat.System /= userHZ
- cpuStat.Idle /= userHZ
- cpuStat.Iowait /= userHZ
- cpuStat.IRQ /= userHZ
- cpuStat.SoftIRQ /= userHZ
- cpuStat.Steal /= userHZ
- cpuStat.Guest /= userHZ
- cpuStat.GuestNice /= userHZ
-
- if cpu == "cpu" {
- return cpuStat, -1, nil
- }
-
- cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
- if err != nil {
- return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
- }
-
- return cpuStat, cpuID, nil
-}
-
-// Parse a softirq line.
-func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
- softIRQStat := SoftIRQStat{}
- var total uint64
- var prefix string
-
- _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d",
- &prefix, &total,
- &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,
- &softIRQStat.Block, &softIRQStat.BlockIoPoll,
- &softIRQStat.Tasklet, &softIRQStat.Sched,
- &softIRQStat.Hrtimer, &softIRQStat.Rcu)
-
- if err != nil {
- return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
- }
-
- return softIRQStat, total, nil
-}
-
-// NewStat returns information about current cpu/process statistics.
-// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
-//
-// Deprecated: Use fs.Stat() instead.
-func NewStat() (Stat, error) {
- fs, err := NewFS(fs.DefaultProcMountPoint)
- if err != nil {
- return Stat{}, err
- }
- return fs.Stat()
-}
-
-// NewStat returns information about current cpu/process statistics.
-// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
-//
-// Deprecated: Use fs.Stat() instead.
-func (fs FS) NewStat() (Stat, error) {
- return fs.Stat()
-}
-
-// Stat returns information about current cpu/process statistics.
-// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
-func (fs FS) Stat() (Stat, error) {
- fileName := fs.proc.Path("stat")
- data, err := util.ReadFileNoStat(fileName)
- if err != nil {
- return Stat{}, err
- }
- procStat, err := parseStat(bytes.NewReader(data), fileName)
- if err != nil {
- return Stat{}, err
- }
- return procStat, nil
-}
-
-// parseStat parses the metrics from /proc/[pid]/stat.
-func parseStat(r io.Reader, fileName string) (Stat, error) {
- var (
- scanner = bufio.NewScanner(r)
- stat = Stat{
- CPU: make(map[int64]CPUStat),
- }
- err error
- )
-
- // Increase default scanner buffer to handle very long `intr` lines.
- buf := make([]byte, 0, 8*1024)
- scanner.Buffer(buf, 1024*1024)
-
- for scanner.Scan() {
- line := scanner.Text()
- parts := strings.Fields(scanner.Text())
- // require at least
- if len(parts) < 2 {
- continue
- }
- switch {
- case parts[0] == "btime":
- if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
- }
- case parts[0] == "intr":
- if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
- }
- numberedIRQs := parts[2:]
- stat.IRQ = make([]uint64, len(numberedIRQs))
- for i, count := range numberedIRQs {
- if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "ctxt":
- if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
- }
- case parts[0] == "processes":
- if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
- }
- case parts[0] == "procs_running":
- if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
- }
- case parts[0] == "procs_blocked":
- if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
- }
- case parts[0] == "softirq":
- softIRQStats, total, err := parseSoftIRQStat(line)
- if err != nil {
- return Stat{}, err
- }
- stat.SoftIRQTotal = total
- stat.SoftIRQ = softIRQStats
- case strings.HasPrefix(parts[0], "cpu"):
- cpuStat, cpuID, err := parseCPUStat(line)
- if err != nil {
- return Stat{}, err
- }
- if cpuID == -1 {
- stat.CPUTotal = cpuStat
- } else {
- stat.CPU[cpuID] = cpuStat
- }
- }
- }
-
- if err := scanner.Err(); err != nil {
- return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err)
- }
-
- return stat, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go
deleted file mode 100644
index 65fec834b..000000000
--- a/vendor/github.com/prometheus/procfs/swaps.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Swap represents an entry in /proc/swaps.
-type Swap struct {
- Filename string
- Type string
- Size int
- Used int
- Priority int
-}
-
-// Swaps returns a slice of all configured swap devices on the system.
-func (fs FS) Swaps() ([]*Swap, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path("swaps"))
- if err != nil {
- return nil, err
- }
- return parseSwaps(data)
-}
-
-func parseSwaps(info []byte) ([]*Swap, error) {
- swaps := []*Swap{}
- scanner := bufio.NewScanner(bytes.NewReader(info))
- scanner.Scan() // ignore header line
- for scanner.Scan() {
- swapString := scanner.Text()
- parsedSwap, err := parseSwapString(swapString)
- if err != nil {
- return nil, err
- }
- swaps = append(swaps, parsedSwap)
- }
-
- err := scanner.Err()
- return swaps, err
-}
-
-func parseSwapString(swapString string) (*Swap, error) {
- var err error
-
- swapFields := strings.Fields(swapString)
- swapLength := len(swapFields)
- if swapLength < 5 {
- return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString)
- }
-
- swap := &Swap{
- Filename: swapFields[0],
- Type: swapFields[1],
- }
-
- swap.Size, err = strconv.Atoi(swapFields[2])
- if err != nil {
- return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
- }
- swap.Used, err = strconv.Atoi(swapFields[3])
- if err != nil {
- return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
- }
- swap.Priority, err = strconv.Atoi(swapFields[4])
- if err != nil {
- return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
- }
-
- return swap, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go
deleted file mode 100644
index 80e0e947b..000000000
--- a/vendor/github.com/prometheus/procfs/thread.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
- "os"
- "strconv"
-
- fsi "github.com/prometheus/procfs/internal/fs"
-)
-
-// Provide access to /proc/PID/task/TID files, for thread specific values. Since
-// such files have the same structure as /proc/PID/ ones, the data structures
-// and the parsers for the latter may be reused.
-
-// AllThreads returns a list of all currently available threads under /proc/PID.
-func AllThreads(pid int) (Procs, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return Procs{}, err
- }
- return fs.AllThreads(pid)
-}
-
-// AllThreads returns a list of all currently available threads for PID.
-func (fs FS) AllThreads(pid int) (Procs, error) {
- taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
- d, err := os.Open(taskPath)
- if err != nil {
- return Procs{}, err
- }
- defer d.Close()
-
- names, err := d.Readdirnames(-1)
- if err != nil {
- return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err)
- }
-
- t := Procs{}
- for _, n := range names {
- tid, err := strconv.ParseInt(n, 10, 64)
- if err != nil {
- continue
- }
-
- t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}})
- }
-
- return t, nil
-}
-
-// Thread returns a process for a given PID, TID.
-func (fs FS) Thread(pid, tid int) (Proc, error) {
- taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
- if _, err := os.Stat(taskPath); err != nil {
- return Proc{}, err
- }
- return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil
-}
-
-// Thread returns a process for a given TID of Proc.
-func (proc Proc) Thread(tid int) (Proc, error) {
- tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal}
- if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
- return Proc{}, err
- }
- return Proc{PID: tid, fs: tfs}, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar
deleted file mode 100644
index 19ef02b8d..000000000
--- a/vendor/github.com/prometheus/procfs/ttar
+++ /dev/null
@@ -1,413 +0,0 @@
-#!/usr/bin/env bash
-
-# Purpose: plain text tar format
-# Limitations: - only suitable for text files, directories, and symlinks
-# - stores only filename, content, and mode
-# - not designed for untrusted input
-#
-# Note: must work with bash version 3.2 (macOS)
-
-# Copyright 2017 Roger Luethi
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -o errexit -o nounset
-
-# Sanitize environment (for instance, standard sorting of glob matches)
-export LC_ALL=C
-
-path=""
-CMD=""
-ARG_STRING="$*"
-
-#------------------------------------------------------------------------------
-# Not all sed implementations can work on null bytes. In order to make ttar
-# work out of the box on macOS, use Python as a stream editor.
-
-USE_PYTHON=0
-
-PYTHON_CREATE_FILTER=$(cat << 'PCF'
-#!/usr/bin/env python
-
-import re
-import sys
-
-for line in sys.stdin:
- line = re.sub(r'EOF', r'\EOF', line)
- line = re.sub(r'NULLBYTE', r'\NULLBYTE', line)
- line = re.sub('\x00', r'NULLBYTE', line)
- sys.stdout.write(line)
-PCF
-)
-
-PYTHON_EXTRACT_FILTER=$(cat << 'PEF'
-#!/usr/bin/env python
-
-import re
-import sys
-
-for line in sys.stdin:
- line = re.sub(r'(?/dev/null; then
- echo "ERROR Python not found. Aborting."
- exit 2
- fi
- USE_PYTHON=1
- fi
-}
-
-#------------------------------------------------------------------------------
-
-function usage {
- bname=$(basename "$0")
- cat << USAGE
-Usage: $bname [-C ] -c -f (create archive)
- $bname -t -f (list archive contents)
- $bname [-C ] -x -f (extract archive)
-
-Options:
- -C (change directory)
- -v (verbose)
- --recursive-unlink (recursively delete existing directory if path
- collides with file or directory to extract)
-
-Example: Change to sysfs directory, create ttar file from fixtures directory
- $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
-USAGE
-exit "$1"
-}
-
-function vecho {
- if [ "${VERBOSE:-}" == "yes" ]; then
- echo >&7 "$@"
- fi
-}
-
-function set_cmd {
- if [ -n "$CMD" ]; then
- echo "ERROR: more than one command given"
- echo
- usage 2
- fi
- CMD=$1
-}
-
-unset VERBOSE
-unset RECURSIVE_UNLINK
-
-while getopts :cf:-:htxvC: opt; do
- case $opt in
- c)
- set_cmd "create"
- ;;
- f)
- ARCHIVE=$OPTARG
- ;;
- h)
- usage 0
- ;;
- t)
- set_cmd "list"
- ;;
- x)
- set_cmd "extract"
- ;;
- v)
- VERBOSE=yes
- exec 7>&1
- ;;
- C)
- CDIR=$OPTARG
- ;;
- -)
- case $OPTARG in
- recursive-unlink)
- RECURSIVE_UNLINK="yes"
- ;;
- *)
- echo -e "Error: invalid option -$OPTARG"
- echo
- usage 1
- ;;
- esac
- ;;
- *)
- echo >&2 "ERROR: invalid option -$OPTARG"
- echo
- usage 1
- ;;
- esac
-done
-
-# Remove processed options from arguments
-shift $(( OPTIND - 1 ));
-
-if [ "${CMD:-}" == "" ]; then
- echo >&2 "ERROR: no command given"
- echo
- usage 1
-elif [ "${ARCHIVE:-}" == "" ]; then
- echo >&2 "ERROR: no archive name given"
- echo
- usage 1
-fi
-
-function list {
- local path=""
- local size=0
- local line_no=0
- local ttar_file=$1
- if [ -n "${2:-}" ]; then
- echo >&2 "ERROR: too many arguments."
- echo
- usage 1
- fi
- if [ ! -e "$ttar_file" ]; then
- echo >&2 "ERROR: file not found ($ttar_file)"
- echo
- usage 1
- fi
- while read -r line; do
- line_no=$(( line_no + 1 ))
- if [ $size -gt 0 ]; then
- size=$(( size - 1 ))
- continue
- fi
- if [[ $line =~ ^Path:\ (.*)$ ]]; then
- path=${BASH_REMATCH[1]}
- elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
- size=${BASH_REMATCH[1]}
- echo "$path"
- elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
- path=${BASH_REMATCH[1]}
- echo "$path/"
- elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
- echo "$path -> ${BASH_REMATCH[1]}"
- fi
- done < "$ttar_file"
-}
-
-function extract {
- local path=""
- local size=0
- local line_no=0
- local ttar_file=$1
- if [ -n "${2:-}" ]; then
- echo >&2 "ERROR: too many arguments."
- echo
- usage 1
- fi
- if [ ! -e "$ttar_file" ]; then
- echo >&2 "ERROR: file not found ($ttar_file)"
- echo
- usage 1
- fi
- while IFS= read -r line; do
- line_no=$(( line_no + 1 ))
- local eof_without_newline
- if [ "$size" -gt 0 ]; then
- if [[ "$line" =~ [^\\]EOF ]]; then
- # An EOF not preceded by a backslash indicates that the line
- # does not end with a newline
- eof_without_newline=1
- else
- eof_without_newline=0
- fi
- # Replace NULLBYTE with null byte if at beginning of line
- # Replace NULLBYTE with null byte unless preceded by backslash
- # Remove one backslash in front of NULLBYTE (if any)
- # Remove EOF unless preceded by backslash
- # Remove one backslash in front of EOF
- if [ $USE_PYTHON -eq 1 ]; then
- echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path"
- else
- # The repeated pattern makes up for sed's lack of negative
- # lookbehind assertions (for consecutive null bytes).
- echo -n "$line" | \
- sed -e 's/^NULLBYTE/\x0/g;
- s/\([^\\]\)NULLBYTE/\1\x0/g;
- s/\([^\\]\)NULLBYTE/\1\x0/g;
- s/\\NULLBYTE/NULLBYTE/g;
- s/\([^\\]\)EOF/\1/g;
- s/\\EOF/EOF/g;
- ' >> "$path"
- fi
- if [[ "$eof_without_newline" -eq 0 ]]; then
- echo >> "$path"
- fi
- size=$(( size - 1 ))
- continue
- fi
- if [[ $line =~ ^Path:\ (.*)$ ]]; then
- path=${BASH_REMATCH[1]}
- if [ -L "$path" ]; then
- rm "$path"
- elif [ -d "$path" ]; then
- if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then
- rm -r "$path"
- else
- # Safe because symlinks to directories are dealt with above
- rmdir "$path"
- fi
- elif [ -e "$path" ]; then
- rm "$path"
- fi
- elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
- size=${BASH_REMATCH[1]}
- # Create file even if it is zero-length.
- touch "$path"
- vecho " $path"
- elif [[ $line =~ ^Mode:\ (.*)$ ]]; then
- mode=${BASH_REMATCH[1]}
- chmod "$mode" "$path"
- vecho "$mode"
- elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
- path=${BASH_REMATCH[1]}
- mkdir -p "$path"
- vecho " $path/"
- elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
- ln -s "${BASH_REMATCH[1]}" "$path"
- vecho " $path -> ${BASH_REMATCH[1]}"
- elif [[ $line =~ ^# ]]; then
- # Ignore comments between files
- continue
- else
- echo >&2 "ERROR: Unknown keyword on line $line_no: $line"
- exit 1
- fi
- done < "$ttar_file"
-}
-
-function div {
- echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \
- "- - - - - -"
-}
-
-function get_mode {
- local mfile=$1
- if [ -z "${STAT_OPTION:-}" ]; then
- if stat -c '%a' "$mfile" >/dev/null 2>&1; then
- # GNU stat
- STAT_OPTION='-c'
- STAT_FORMAT='%a'
- else
- # BSD stat
- STAT_OPTION='-f'
- # Octal output, user/group/other (omit file type, sticky bit)
- STAT_FORMAT='%OLp'
- fi
- fi
- stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile"
-}
-
-function _create {
- shopt -s nullglob
- local mode
- local eof_without_newline
- while (( "$#" )); do
- file=$1
- if [ -L "$file" ]; then
- echo "Path: $file"
- symlinkTo=$(readlink "$file")
- echo "SymlinkTo: $symlinkTo"
- vecho " $file -> $symlinkTo"
- div
- elif [ -d "$file" ]; then
- # Strip trailing slash (if there is one)
- file=${file%/}
- echo "Directory: $file"
- mode=$(get_mode "$file")
- echo "Mode: $mode"
- vecho "$mode $file/"
- div
- # Find all files and dirs, including hidden/dot files
- for x in "$file/"{*,.[^.]*}; do
- _create "$x"
- done
- elif [ -f "$file" ]; then
- echo "Path: $file"
- lines=$(wc -l "$file"|awk '{print $1}')
- eof_without_newline=0
- if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \
- [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then
- eof_without_newline=1
- lines=$((lines+1))
- fi
- echo "Lines: $lines"
- # Add backslash in front of EOF
- # Add backslash in front of NULLBYTE
- # Replace null byte with NULLBYTE
- if [ $USE_PYTHON -eq 1 ]; then
- < "$file" python -c "$PYTHON_CREATE_FILTER"
- else
- < "$file" \
- sed 's/EOF/\\EOF/g;
- s/NULLBYTE/\\NULLBYTE/g;
- s/\x0/NULLBYTE/g;
- '
- fi
- if [[ "$eof_without_newline" -eq 1 ]]; then
- # Finish line with EOF to indicate that the original line did
- # not end with a linefeed
- echo "EOF"
- fi
- mode=$(get_mode "$file")
- echo "Mode: $mode"
- vecho "$mode $file"
- div
- else
- echo >&2 "ERROR: file not found ($file in $(pwd))"
- exit 2
- fi
- shift
- done
-}
-
-function create {
- ttar_file=$1
- shift
- if [ -z "${1:-}" ]; then
- echo >&2 "ERROR: missing arguments."
- echo
- usage 1
- fi
- if [ -e "$ttar_file" ]; then
- rm "$ttar_file"
- fi
- exec > "$ttar_file"
- echo "# Archive created by ttar $ARG_STRING"
- _create "$@"
-}
-
-test_environment
-
-if [ -n "${CDIR:-}" ]; then
- if [[ "$ARCHIVE" != /* ]]; then
- # Relative path: preserve the archive's location before changing
- # directory
- ARCHIVE="$(pwd)/$ARCHIVE"
- fi
- cd "$CDIR"
-fi
-
-"$CMD" "$ARCHIVE" "$@"
diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go
deleted file mode 100644
index 51c49d89e..000000000
--- a/vendor/github.com/prometheus/procfs/vm.go
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !windows
-// +build !windows
-
-package procfs
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// The VM interface is described at
-//
-// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
-//
-// Each setting is exposed as a single file.
-// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
-// and numa_zonelist_order (deprecated) which is a string.
-type VM struct {
- AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes
- BlockDump *int64 // /proc/sys/vm/block_dump
- CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed
- DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes
- DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio
- DirtyBytes *int64 // /proc/sys/vm/dirty_bytes
- DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs
- DirtyRatio *int64 // /proc/sys/vm/dirty_ratio
- DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds
- DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs
- DropCaches *int64 // /proc/sys/vm/drop_caches
- ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold
- HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group
- LaptopMode *int64 // /proc/sys/vm/laptop_mode
- LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout
- LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio
- MaxMapCount *int64 // /proc/sys/vm/max_map_count
- MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill
- MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery
- MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes
- MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio
- MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio
- MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr
- NrHugepages *int64 // /proc/sys/vm/nr_hugepages
- NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy
- NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages
- NumaStat *int64 // /proc/sys/vm/numa_stat
- NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order
- OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks
- OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task
- OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes
- OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory
- OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio
- PageCluster *int64 // /proc/sys/vm/page-cluster
- PanicOnOom *int64 // /proc/sys/vm/panic_on_oom
- PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction
- StatInterval *int64 // /proc/sys/vm/stat_interval
- Swappiness *int64 // /proc/sys/vm/swappiness
- UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes
- VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure
- WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor
- WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor
- ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode
-}
-
-// VM reads the VM statistics from the specified `proc` filesystem.
-func (fs FS) VM() (*VM, error) {
- path := fs.proc.Path("sys/vm")
- file, err := os.Stat(path)
- if err != nil {
- return nil, err
- }
- if !file.Mode().IsDir() {
- return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path)
- }
-
- files, err := os.ReadDir(path)
- if err != nil {
- return nil, err
- }
-
- var vm VM
- for _, f := range files {
- if f.IsDir() {
- continue
- }
-
- name := filepath.Join(path, f.Name())
- // ignore errors on read, as there are some write only
- // in /proc/sys/vm
- value, err := util.SysReadFile(name)
- if err != nil {
- continue
- }
- vp := util.NewValueParser(value)
-
- switch f.Name() {
- case "admin_reserve_kbytes":
- vm.AdminReserveKbytes = vp.PInt64()
- case "block_dump":
- vm.BlockDump = vp.PInt64()
- case "compact_unevictable_allowed":
- vm.CompactUnevictableAllowed = vp.PInt64()
- case "dirty_background_bytes":
- vm.DirtyBackgroundBytes = vp.PInt64()
- case "dirty_background_ratio":
- vm.DirtyBackgroundRatio = vp.PInt64()
- case "dirty_bytes":
- vm.DirtyBytes = vp.PInt64()
- case "dirty_expire_centisecs":
- vm.DirtyExpireCentisecs = vp.PInt64()
- case "dirty_ratio":
- vm.DirtyRatio = vp.PInt64()
- case "dirtytime_expire_seconds":
- vm.DirtytimeExpireSeconds = vp.PInt64()
- case "dirty_writeback_centisecs":
- vm.DirtyWritebackCentisecs = vp.PInt64()
- case "drop_caches":
- vm.DropCaches = vp.PInt64()
- case "extfrag_threshold":
- vm.ExtfragThreshold = vp.PInt64()
- case "hugetlb_shm_group":
- vm.HugetlbShmGroup = vp.PInt64()
- case "laptop_mode":
- vm.LaptopMode = vp.PInt64()
- case "legacy_va_layout":
- vm.LegacyVaLayout = vp.PInt64()
- case "lowmem_reserve_ratio":
- stringSlice := strings.Fields(value)
- pint64Slice := make([]*int64, 0, len(stringSlice))
- for _, value := range stringSlice {
- vp := util.NewValueParser(value)
- pint64Slice = append(pint64Slice, vp.PInt64())
- }
- vm.LowmemReserveRatio = pint64Slice
- case "max_map_count":
- vm.MaxMapCount = vp.PInt64()
- case "memory_failure_early_kill":
- vm.MemoryFailureEarlyKill = vp.PInt64()
- case "memory_failure_recovery":
- vm.MemoryFailureRecovery = vp.PInt64()
- case "min_free_kbytes":
- vm.MinFreeKbytes = vp.PInt64()
- case "min_slab_ratio":
- vm.MinSlabRatio = vp.PInt64()
- case "min_unmapped_ratio":
- vm.MinUnmappedRatio = vp.PInt64()
- case "mmap_min_addr":
- vm.MmapMinAddr = vp.PInt64()
- case "nr_hugepages":
- vm.NrHugepages = vp.PInt64()
- case "nr_hugepages_mempolicy":
- vm.NrHugepagesMempolicy = vp.PInt64()
- case "nr_overcommit_hugepages":
- vm.NrOvercommitHugepages = vp.PInt64()
- case "numa_stat":
- vm.NumaStat = vp.PInt64()
- case "numa_zonelist_order":
- vm.NumaZonelistOrder = value
- case "oom_dump_tasks":
- vm.OomDumpTasks = vp.PInt64()
- case "oom_kill_allocating_task":
- vm.OomKillAllocatingTask = vp.PInt64()
- case "overcommit_kbytes":
- vm.OvercommitKbytes = vp.PInt64()
- case "overcommit_memory":
- vm.OvercommitMemory = vp.PInt64()
- case "overcommit_ratio":
- vm.OvercommitRatio = vp.PInt64()
- case "page-cluster":
- vm.PageCluster = vp.PInt64()
- case "panic_on_oom":
- vm.PanicOnOom = vp.PInt64()
- case "percpu_pagelist_fraction":
- vm.PercpuPagelistFraction = vp.PInt64()
- case "stat_interval":
- vm.StatInterval = vp.PInt64()
- case "swappiness":
- vm.Swappiness = vp.PInt64()
- case "user_reserve_kbytes":
- vm.UserReserveKbytes = vp.PInt64()
- case "vfs_cache_pressure":
- vm.VfsCachePressure = vp.PInt64()
- case "watermark_boost_factor":
- vm.WatermarkBoostFactor = vp.PInt64()
- case "watermark_scale_factor":
- vm.WatermarkScaleFactor = vp.PInt64()
- case "zone_reclaim_mode":
- vm.ZoneReclaimMode = vp.PInt64()
- }
- if err := vp.Err(); err != nil {
- return nil, err
- }
- }
-
- return &vm, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go
deleted file mode 100644
index e54d94b09..000000000
--- a/vendor/github.com/prometheus/procfs/zoneinfo.go
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !windows
-// +build !windows
-
-package procfs
-
-import (
- "bytes"
- "fmt"
- "os"
- "regexp"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Zoneinfo holds info parsed from /proc/zoneinfo.
-type Zoneinfo struct {
- Node string
- Zone string
- NrFreePages *int64
- Min *int64
- Low *int64
- High *int64
- Scanned *int64
- Spanned *int64
- Present *int64
- Managed *int64
- NrActiveAnon *int64
- NrInactiveAnon *int64
- NrIsolatedAnon *int64
- NrAnonPages *int64
- NrAnonTransparentHugepages *int64
- NrActiveFile *int64
- NrInactiveFile *int64
- NrIsolatedFile *int64
- NrFilePages *int64
- NrSlabReclaimable *int64
- NrSlabUnreclaimable *int64
- NrMlockStack *int64
- NrKernelStack *int64
- NrMapped *int64
- NrDirty *int64
- NrWriteback *int64
- NrUnevictable *int64
- NrShmem *int64
- NrDirtied *int64
- NrWritten *int64
- NumaHit *int64
- NumaMiss *int64
- NumaForeign *int64
- NumaInterleave *int64
- NumaLocal *int64
- NumaOther *int64
- Protection []*int64
-}
-
-var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
-
-// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of
-// structs containing the relevant info. More information available here:
-// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
-func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
- data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
- if err != nil {
- return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
- }
- zoneinfo, err := parseZoneinfo(data)
- if err != nil {
- return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
- }
- return zoneinfo, nil
-}
-
-func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) {
-
- zoneinfo := []Zoneinfo{}
-
- zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode"))
- for _, block := range zoneinfoBlocks {
- var zoneinfoElement Zoneinfo
- lines := strings.Split(string(block), "\n")
- for _, line := range lines {
-
- if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil {
- zoneinfoElement.Node = nodeZone[1]
- zoneinfoElement.Zone = nodeZone[2]
- continue
- }
- if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") {
- continue
- }
- parts := strings.Fields(strings.TrimSpace(line))
- if len(parts) < 2 {
- continue
- }
- vp := util.NewValueParser(parts[1])
- switch parts[0] {
- case "nr_free_pages":
- zoneinfoElement.NrFreePages = vp.PInt64()
- case "min":
- zoneinfoElement.Min = vp.PInt64()
- case "low":
- zoneinfoElement.Low = vp.PInt64()
- case "high":
- zoneinfoElement.High = vp.PInt64()
- case "scanned":
- zoneinfoElement.Scanned = vp.PInt64()
- case "spanned":
- zoneinfoElement.Spanned = vp.PInt64()
- case "present":
- zoneinfoElement.Present = vp.PInt64()
- case "managed":
- zoneinfoElement.Managed = vp.PInt64()
- case "nr_active_anon":
- zoneinfoElement.NrActiveAnon = vp.PInt64()
- case "nr_inactive_anon":
- zoneinfoElement.NrInactiveAnon = vp.PInt64()
- case "nr_isolated_anon":
- zoneinfoElement.NrIsolatedAnon = vp.PInt64()
- case "nr_anon_pages":
- zoneinfoElement.NrAnonPages = vp.PInt64()
- case "nr_anon_transparent_hugepages":
- zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64()
- case "nr_active_file":
- zoneinfoElement.NrActiveFile = vp.PInt64()
- case "nr_inactive_file":
- zoneinfoElement.NrInactiveFile = vp.PInt64()
- case "nr_isolated_file":
- zoneinfoElement.NrIsolatedFile = vp.PInt64()
- case "nr_file_pages":
- zoneinfoElement.NrFilePages = vp.PInt64()
- case "nr_slab_reclaimable":
- zoneinfoElement.NrSlabReclaimable = vp.PInt64()
- case "nr_slab_unreclaimable":
- zoneinfoElement.NrSlabUnreclaimable = vp.PInt64()
- case "nr_mlock_stack":
- zoneinfoElement.NrMlockStack = vp.PInt64()
- case "nr_kernel_stack":
- zoneinfoElement.NrKernelStack = vp.PInt64()
- case "nr_mapped":
- zoneinfoElement.NrMapped = vp.PInt64()
- case "nr_dirty":
- zoneinfoElement.NrDirty = vp.PInt64()
- case "nr_writeback":
- zoneinfoElement.NrWriteback = vp.PInt64()
- case "nr_unevictable":
- zoneinfoElement.NrUnevictable = vp.PInt64()
- case "nr_shmem":
- zoneinfoElement.NrShmem = vp.PInt64()
- case "nr_dirtied":
- zoneinfoElement.NrDirtied = vp.PInt64()
- case "nr_written":
- zoneinfoElement.NrWritten = vp.PInt64()
- case "numa_hit":
- zoneinfoElement.NumaHit = vp.PInt64()
- case "numa_miss":
- zoneinfoElement.NumaMiss = vp.PInt64()
- case "numa_foreign":
- zoneinfoElement.NumaForeign = vp.PInt64()
- case "numa_interleave":
- zoneinfoElement.NumaInterleave = vp.PInt64()
- case "numa_local":
- zoneinfoElement.NumaLocal = vp.PInt64()
- case "numa_other":
- zoneinfoElement.NumaOther = vp.PInt64()
- case "protection:":
- protectionParts := strings.Split(line, ":")
- protectionValues := strings.Replace(protectionParts[1], "(", "", 1)
- protectionValues = strings.Replace(protectionValues, ")", "", 1)
- protectionValues = strings.TrimSpace(protectionValues)
- protectionStringMap := strings.Split(protectionValues, ", ")
- val, err := util.ParsePInt64s(protectionStringMap)
- if err == nil {
- zoneinfoElement.Protection = val
- }
- }
-
- }
-
- zoneinfo = append(zoneinfo, zoneinfoElement)
- }
- return zoneinfo, nil
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/LICENSE b/vendor/github.com/prometheus/statsd_exporter/LICENSE
deleted file mode 100644
index 261eeb9e9..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/prometheus/statsd_exporter/NOTICE b/vendor/github.com/prometheus/statsd_exporter/NOTICE
deleted file mode 100644
index 33179a982..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-StatsD-to-Prometheus exporter
-Copyright 2013-2015 The Prometheus Authors
-
-This product includes software developed at
-SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/level/level.go b/vendor/github.com/prometheus/statsd_exporter/pkg/level/level.go
deleted file mode 100644
index 5b03c6d6c..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/level/level.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2021 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package level
-
-import (
- "fmt"
-
- "github.com/go-kit/log"
- "github.com/go-kit/log/level"
-)
-
-var logLevel = LevelInfo
-
-// A Level is a logging priority. Higher levels are more important.
-type Level int
-
-const (
- // LevelDebug logs are typically voluminous, and are usually disabled in
- // production.
- LevelDebug Level = iota
- // LevelInfo is the default logging priority.
- LevelInfo
- // LevelWarn logs are more important than Info, but don't need individual
- // human review.
- LevelWarn
- // LevelError logs are high-priority. If an application is running smoothly,
- // it shouldn't generate any error-level logs.
- LevelError
-)
-
-var emptyLogger = &EmptyLogger{}
-
-type EmptyLogger struct{}
-
-func (l *EmptyLogger) Log(keyvals ...interface{}) error {
- return nil
-}
-
-// SetLogLevel sets the log level.
-func SetLogLevel(level string) error {
- switch level {
- case "debug":
- logLevel = LevelDebug
- case "info":
- logLevel = LevelInfo
- case "warn":
- logLevel = LevelWarn
- case "error":
- logLevel = LevelError
- default:
- return fmt.Errorf("unrecognized log level %s", level)
- }
- return nil
-}
-
-// Error returns a logger that includes a Key/ErrorValue pair.
-func Error(logger log.Logger) log.Logger {
- if logLevel <= LevelError {
- return level.Error(logger)
- }
- return emptyLogger
-}
-
-// Warn returns a logger that includes a Key/WarnValue pair.
-func Warn(logger log.Logger) log.Logger {
- if logLevel <= LevelWarn {
- return level.Warn(logger)
- }
- return emptyLogger
-}
-
-// Info returns a logger that includes a Key/InfoValue pair.
-func Info(logger log.Logger) log.Logger {
- if logLevel <= LevelInfo {
- return level.Info(logger)
- }
- return emptyLogger
-}
-
-// Debug returns a logger that includes a Key/DebugValue pair.
-func Debug(logger log.Logger) log.Logger {
- if logLevel <= LevelDebug {
- return level.Debug(logger)
- }
- return emptyLogger
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/action.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/action.go
deleted file mode 100644
index b8c09771b..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/action.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mapper
-
-import "fmt"
-
-type ActionType string
-
-const (
- ActionTypeMap ActionType = "map"
- ActionTypeDrop ActionType = "drop"
- ActionTypeDefault ActionType = ""
-)
-
-func (t *ActionType) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var v string
-
- if err := unmarshal(&v); err != nil {
- return err
- }
-
- switch ActionType(v) {
- case ActionTypeDrop:
- *t = ActionTypeDrop
- case ActionTypeMap, ActionTypeDefault:
- *t = ActionTypeMap
- default:
- return fmt.Errorf("invalid action type %q", v)
- }
- return nil
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/escape.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/escape.go
deleted file mode 100644
index f73b16d45..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/escape.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mapper
-
-import (
- "strings"
- "unicode/utf8"
-)
-
-// EscapeMetricName replaces invalid characters in the metric name with "_"
-// Valid characters are a-z, A-Z, 0-9, and _
-func EscapeMetricName(metricName string) string {
- metricLen := len(metricName)
- if metricLen == 0 {
- return ""
- }
-
- escaped := false
- var sb strings.Builder
- // If a metric starts with a digit, allocate the memory and prepend an
- // underscore.
- if metricName[0] >= '0' && metricName[0] <= '9' {
- escaped = true
- sb.Grow(metricLen + 1)
- sb.WriteByte('_')
- }
-
- // This is an character replacement method optimized for this limited
- // use case. It is much faster than using a regex.
- offset := 0
-
- var prevChar rune
-
- for i, c := range metricName {
- // Seek forward, skipping valid characters until we find one that needs
- // to be replaced, then add all the characters we've seen so far to the
- // string.Builder.
- if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
- (c >= '0' && c <= '9') || (c == '_') {
- // Character is valid, so skip over it without doing anything.
- } else {
- // Double-dashes are allowed if there is a corresponding mapping.
- // For consistency, double-dashes should also be allowed in the default case.
- if c == '-' && prevChar == '-' {
- offset = i + utf8.RuneLen(c)
- continue
- }
-
- if !escaped {
- // Up until now we've been lazy and avoided actually allocating
- // memory. Unfortunately we've now determined this string needs
- // escaping, so allocate the buffer for the whole string.
- escaped = true
- sb.Grow(metricLen)
- }
- sb.WriteString(metricName[offset:i])
- offset = i + utf8.RuneLen(c)
- sb.WriteByte('_')
- }
-
- prevChar = c
- }
-
- if !escaped {
- // This is the happy path where nothing had to be escaped, so we can
- // avoid doing anything.
- return metricName
- }
-
- if offset < metricLen {
- sb.WriteString(metricName[offset:])
- }
-
- return sb.String()
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/README.md b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/README.md
deleted file mode 100644
index 9d7da44e7..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/README.md
+++ /dev/null
@@ -1,132 +0,0 @@
-# FSM Mapping
-
-## Overview
-
-This package implements a fast and efficient algorithm for generic glob style
-string matching using a finite state machine (FSM).
-
-### Source Hierachy
-
-```
- '-- fsm
- '-- dump.go // functionality to dump the FSM to Dot file
- '-- formatter.go // format glob templates using captured * groups
- '-- fsm.go // manipulating and searching of FSM
- '-- minmax.go // min() max() function for interger
-```
-
-## FSM Explained
-
-Per [Wikipedia](https://en.wikipedia.org/wiki/Finite-state_machine):
-
-> A finite-state machine (FSM) or finite-state automaton (FSA, plural: automata),
-> finite automaton, or simply a state machine, is a mathematical model of
-> computation. It is an abstract machine that can be in exactly one of a finite
-> number of states at any given time. The FSM can change from one state to
-> another in response to some external inputs; the change from one state to
-> another is called a transition. An FSM is defined by a list of its states, its
-> initial state, and the conditions for each transition.
-
-In our use case, each *state* is a substring after the input StatsD metric name is splitted by `.`.
-
-### Add state to FSM
-
-`func (f *FSM) AddState(match string, matchMetricType string,
-maxPossibleTransitions int, result interface{}) int`
-
-At first, the FSM only contains three states, representing three possible metric types:
-
- ____ [gauge]
- /
- (start)---- [counter]
- \
- '--- [observer]
-
-
-Adding a rule `client.*.request.count` with type `counter` will make the FSM to be:
-
-
- ____ [gauge]
- /
- (start)---- [counter] -- [client] -- [*] -- [request] -- [count] -- {R1}
- \
- '--- [observer]
-
-`{R1}` is short for result 1, which is the match result for `client.*.request.count`.
-
-Adding a rule `client.*.*.size` with type `counter` will make the FSM to be:
-
- ____ [gauge] __ [request] -- [count] -- {R1}
- / /
- (start)---- [counter] -- [client] -- [*]
- \ \__ [*] -- [size] -- {R2}
- '--- [observer]
-
-
-### Finding a result state in FSM
-
-`func (f *FSM) GetMapping(statsdMetric string, statsdMetricType string)
-(*mappingState, []string)`
-
-For example, when mapping `client.aaa.request.count` with `counter` type in the
-FSM, the `^1` to `^7` symbols indicate how FSM will traversal in its tree:
-
-
- ____ [gauge] __ [request] -- [count] -- {R1}
- / / ^5 ^6 ^7
- (start)---- [counter] -- [client] -- [*]
- ^1 \ ^2 ^3 \__ [*] -- [size] -- {R2}
- '--- [observer] ^4
-
-
-To map `client.bbb.request.size`, FSM will do a backtracking:
-
-
- ____ [gauge] __ [request] -- [count] -- {R1}
- / / ^5 ^6
- (start)---- [counter] -- [client] -- [*]
- ^1 \ ^2 ^3 \__ [*] -- [size] -- {R2}
- '--- [observer] ^4
- ^7 ^8 ^9
-
-
-## Debugging
-
-To see all the states of the current FSM, use `func (f *FSM) DumpFSM(w io.Writer)`
-to dump into a Dot file. The Dot file can be further renderer into image using:
-
-```shell
-$ dot -Tpng dump.dot > dump.png
-```
-
-In StatsD exporter, one could use the following:
-
-```shell
-$ statsd_exporter --statsd.mapping-config=statsd.rules --debug.dump-fsm=dump.dot
-$ dot -Tpng dump.dot > dump.png
-```
-
-For example, the following rules:
-
-```yaml
-mappings:
-- match: client.*.request.count
- name: request_count
- match_metric_type: counter
- labels:
- client: $1
-
-- match: client.*.*.size
- name: sizes
- match_metric_type: counter
- labels:
- client: $1
- direction: $2
-```
-
-will be rendered as:
-
-
-
-The `dot` program is part of [Graphviz](https://www.graphviz.org/) and is
-available in most of popular operating systems.
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/dump.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/dump.go
deleted file mode 100644
index 35772f9c7..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/dump.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fsm
-
-import (
- "fmt"
- "io"
-)
-
-// DumpFSM accepts a io.writer and write the current FSM into dot file format.
-func (f *FSM) DumpFSM(w io.Writer) {
- idx := 0
- states := make(map[int]*mappingState)
- states[idx] = f.root
-
- w.Write([]byte("digraph g {\n"))
- w.Write([]byte("rankdir=LR\n")) // make it vertical
- w.Write([]byte("node [ label=\"\",style=filled,fillcolor=white,shape=circle ]\n")) // remove label of node
-
- for idx < len(states) {
- for field, transition := range states[idx].transitions {
- states[len(states)] = transition
- w.Write([]byte(fmt.Sprintf("%d -> %d [label = \"%s\"];\n", idx, len(states)-1, field)))
- if idx == 0 {
- // color for metric types
- w.Write([]byte(fmt.Sprintf("%d [color=\"#D6B656\",fillcolor=\"#FFF2CC\"];\n", len(states)-1)))
- } else if transition.transitions == nil || len(transition.transitions) == 0 {
- // color for end state
- w.Write([]byte(fmt.Sprintf("%d [color=\"#82B366\",fillcolor=\"#D5E8D4\"];\n", len(states)-1)))
- }
- }
- idx++
- }
- // color for start state
- w.Write([]byte(fmt.Sprintln("0 [color=\"#a94442\",fillcolor=\"#f2dede\"];")))
- w.Write([]byte("}"))
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/formatter.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/formatter.go
deleted file mode 100644
index 567bbc289..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/formatter.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fsm
-
-import (
- "fmt"
- "regexp"
- "strconv"
- "strings"
-)
-
-var (
- templateReplaceCaptureRE = regexp.MustCompile(`\$\{?([a-zA-Z0-9_\$]+)\}?`)
-)
-
-type TemplateFormatter struct {
- captureIndexes []int
- captureCount int
- fmtString string
-}
-
-// NewTemplateFormatter instantiates a TemplateFormatter
-// from given template string and the maximum amount of captures.
-func NewTemplateFormatter(template string, captureCount int) *TemplateFormatter {
- matches := templateReplaceCaptureRE.FindAllStringSubmatch(template, -1)
- if len(matches) == 0 {
- // if no regex reference found, keep it as it is
- return &TemplateFormatter{captureCount: 0, fmtString: template}
- }
-
- var indexes []int
- valueFormatter := template
- for _, match := range matches {
- idx, err := strconv.Atoi(match[len(match)-1])
- if err != nil || idx > captureCount || idx < 1 {
- // if index larger than captured count or using unsupported named capture group,
- // replace with empty string
- valueFormatter = strings.Replace(valueFormatter, match[0], "", -1)
- } else {
- valueFormatter = strings.Replace(valueFormatter, match[0], "%s", -1)
- // note: the regex reference variable $? starts from 1
- indexes = append(indexes, idx-1)
- }
- }
- return &TemplateFormatter{
- captureIndexes: indexes,
- captureCount: len(indexes),
- fmtString: valueFormatter,
- }
-}
-
-// Format accepts a list containing captured strings and returns the formatted
-// string using the template stored in current TemplateFormatter.
-func (formatter *TemplateFormatter) Format(captures []string) string {
- if formatter.captureCount == 0 {
- // no label substitution, keep as it is
- return formatter.fmtString
- }
- indexes := formatter.captureIndexes
- vargs := make([]interface{}, formatter.captureCount)
- for i, idx := range indexes {
- vargs[i] = captures[idx]
- }
- return fmt.Sprintf(formatter.fmtString, vargs...)
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/fsm.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/fsm.go
deleted file mode 100644
index 85068f7f6..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/fsm.go
+++ /dev/null
@@ -1,326 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fsm
-
-import (
- "regexp"
- "strings"
-
- "github.com/go-kit/log"
-
- "github.com/prometheus/statsd_exporter/pkg/level"
-)
-
-type mappingState struct {
- transitions map[string]*mappingState
- minRemainingLength int
- maxRemainingLength int
- // result* members are nil unless there's a metric ends with this state
- Result interface{}
- ResultPriority int
-}
-
-type fsmBacktrackStackCursor struct {
- fieldIndex int
- captureIndex int
- currentCapture string
- state *mappingState
- prev *fsmBacktrackStackCursor
- next *fsmBacktrackStackCursor
-}
-
-type FSM struct {
- root *mappingState
- metricTypes []string
- statesCount int
- BacktrackingNeeded bool
- OrderingDisabled bool
-}
-
-// NewFSM creates a new FSM instance
-func NewFSM(metricTypes []string, maxPossibleTransitions int, orderingDisabled bool) *FSM {
- fsm := FSM{}
- root := &mappingState{}
- root.transitions = make(map[string]*mappingState, len(metricTypes))
-
- for _, field := range metricTypes {
- state := &mappingState{}
- (*state).transitions = make(map[string]*mappingState, maxPossibleTransitions)
- root.transitions[string(field)] = state
- }
- fsm.OrderingDisabled = orderingDisabled
- fsm.metricTypes = metricTypes
- fsm.statesCount = 0
- fsm.root = root
- return &fsm
-}
-
-// AddState adds a mapping rule into the existing FSM.
-// The maxPossibleTransitions parameter sets the expected count of transitions left.
-// The result parameter sets the generic type to be returned when fsm found a match in GetMapping.
-func (f *FSM) AddState(match string, matchMetricType string, maxPossibleTransitions int, result interface{}) int {
- // first split by "."
- matchFields := strings.Split(match, ".")
- // fill into our FSM
- roots := []*mappingState{}
- // first state is the metric type
- if matchMetricType == "" {
- // if metricType not specified, connect the start state from all three types
- for _, metricType := range f.metricTypes {
- roots = append(roots, f.root.transitions[string(metricType)])
- }
- } else {
- roots = append(roots, f.root.transitions[matchMetricType])
- }
- var captureCount int
- var finalStates []*mappingState
- // iterating over different start state (different metric types)
- for _, root := range roots {
- captureCount = 0
- // for each start state, connect from start state to end state
- for i, field := range matchFields {
- state, prs := root.transitions[field]
- if !prs {
- // create a state if it's not exist in the fsm
- state = &mappingState{}
- (*state).transitions = make(map[string]*mappingState, maxPossibleTransitions)
- (*state).maxRemainingLength = len(matchFields) - i - 1
- (*state).minRemainingLength = len(matchFields) - i - 1
- root.transitions[field] = state
- // if this is last field, set result to currentMapping instance
- if i == len(matchFields)-1 {
- root.transitions[field].Result = result
- }
- } else {
- (*state).maxRemainingLength = max(len(matchFields)-i-1, (*state).maxRemainingLength)
- (*state).minRemainingLength = min(len(matchFields)-i-1, (*state).minRemainingLength)
- }
- if field == "*" {
- captureCount++
- }
-
- // goto next state
- root = state
- }
- finalStates = append(finalStates, root)
- }
-
- for _, state := range finalStates {
- state.ResultPriority = f.statesCount
- }
-
- f.statesCount++
-
- return captureCount
-}
-
-// GetMapping using the fsm to find matching rules according to given statsdMetric and statsdMetricType.
-// If it finds a match, the final state and the captured strings are returned;
-// if there's no match found, nil and a empty list will be returned.
-func (f *FSM) GetMapping(statsdMetric string, statsdMetricType string) (*mappingState, []string) {
- matchFields := strings.Split(statsdMetric, ".")
- currentState := f.root.transitions[statsdMetricType]
-
- // the cursor/pointer in the backtrack stack implemented as a double-linked list
- var backtrackCursor *fsmBacktrackStackCursor
- resumeFromBacktrack := false
-
- // the return variable
- var finalState *mappingState
-
- captures := make([]string, len(matchFields))
- finalCaptures := make([]string, len(matchFields))
- // keep track of captured group so we don't need to do append() on captures
- captureIdx := 0
- filedsCount := len(matchFields)
- i := 0
- var state *mappingState
- for { // the loop for backtracking
- for { // the loop for a single "depth only" search
- var present bool
- // if we resume from backtrack, we should skip this branch in this case
- // since the state that were saved at the end of this branch
- if !resumeFromBacktrack {
- if len(currentState.transitions) > 0 {
- field := matchFields[i]
- state, present = currentState.transitions[field]
- fieldsLeft := filedsCount - i - 1
- // also compare length upfront to avoid unnecessary loop or backtrack
- if !present || fieldsLeft > state.maxRemainingLength || fieldsLeft < state.minRemainingLength {
- state, present = currentState.transitions["*"]
- if !present || fieldsLeft > state.maxRemainingLength || fieldsLeft < state.minRemainingLength {
- break
- } else {
- captures[captureIdx] = field
- captureIdx++
- }
- } else if f.BacktrackingNeeded {
- // if backtracking is needed, also check for alternative transition, i.e. *
- altState, present := currentState.transitions["*"]
- if !present || fieldsLeft > altState.maxRemainingLength || fieldsLeft < altState.minRemainingLength {
- } else {
- // push to backtracking stack
- newCursor := fsmBacktrackStackCursor{prev: backtrackCursor, state: altState,
- fieldIndex: i,
- captureIndex: captureIdx, currentCapture: field,
- }
- // if this is not the first time, connect to the previous cursor
- if backtrackCursor != nil {
- backtrackCursor.next = &newCursor
- }
- backtrackCursor = &newCursor
- }
- }
- } else {
- // no more transitions for this state
- break
- }
- } // backtrack will resume from here
-
- // do we reach a final state?
- if state.Result != nil && i == filedsCount-1 {
- if f.OrderingDisabled {
- finalState = state
- return finalState, captures
- } else if finalState == nil || finalState.ResultPriority > state.ResultPriority {
- // if we care about ordering, try to find a result with highest prioity
- finalState = state
- // do a deep copy to preserve current captures
- copy(finalCaptures, captures)
- }
- break
- }
-
- i++
- if i >= filedsCount {
- break
- }
-
- resumeFromBacktrack = false
- currentState = state
- }
- if backtrackCursor == nil {
- // if we are not doing backtracking or all path has been travesaled
- break
- } else {
- // pop one from stack
- state = backtrackCursor.state
- currentState = state
- i = backtrackCursor.fieldIndex
- captureIdx = backtrackCursor.captureIndex + 1
- // put the * capture back
- captures[captureIdx-1] = backtrackCursor.currentCapture
- backtrackCursor = backtrackCursor.prev
- if backtrackCursor != nil {
- // deref for GC
- backtrackCursor.next = nil
- }
- resumeFromBacktrack = true
- }
- }
- return finalState, finalCaptures
-}
-
-// TestIfNeedBacktracking tests if backtrack is needed for given list of mappings
-// and whether ordering is disabled.
-func TestIfNeedBacktracking(mappings []string, orderingDisabled bool, logger log.Logger) bool {
- backtrackingNeeded := false
- // A has * in rules, but there's other transisitions at the same state,
- // this makes A the cause of backtracking
- ruleByLength := make(map[int][]string)
- ruleREByLength := make(map[int][]*regexp.Regexp)
-
- // first sort rules by length
- for _, mapping := range mappings {
- l := len(strings.Split(mapping, "."))
- ruleByLength[l] = append(ruleByLength[l], mapping)
-
- metricRe := strings.Replace(mapping, ".", "\\.", -1)
- metricRe = strings.Replace(metricRe, "*", "([^.]*)", -1)
- regex, err := regexp.Compile("^" + metricRe + "$")
- if err != nil {
- level.Warn(logger).Log("msg", "Invalid match, cannot compile regex in mapping", "mapping", mapping, "err", err)
- }
- // put into array no matter there's error or not, we will skip later if regex is nil
- ruleREByLength[l] = append(ruleREByLength[l], regex)
- }
-
- for l, rules := range ruleByLength {
- if len(rules) == 1 {
- continue
- }
- rulesRE := ruleREByLength[l]
- for i1, r1 := range rules {
- currentRuleNeedBacktrack := false
- re1 := rulesRE[i1]
- if re1 == nil || !strings.Contains(r1, "*") {
- continue
- }
- // if rule r1 is A.B.C.*.E.*, is there a rule r2 is A.B.C.D.x.x or A.B.C.*.E.F ? (x is any string or *)
- // if such r2 exists, then to match r1 we will need backtracking
- for index := 0; index < len(r1); index++ {
- if r1[index] != '*' {
- continue
- }
- // translate the substring of r1 from 0 to the index of current * into regex
- // A.B.C.*.E.* will becomes ^A\.B\.C\. and ^A\.B\.C\.\*\.E\.
- reStr := strings.Replace(r1[:index], ".", "\\.", -1)
- reStr = strings.Replace(reStr, "*", "\\*", -1)
- re := regexp.MustCompile("^" + reStr)
- for i2, r2 := range rules {
- if i2 == i1 {
- continue
- }
- if len(re.FindStringSubmatchIndex(r2)) > 0 {
- currentRuleNeedBacktrack = true
- break
- }
- }
- }
-
- for i2, r2 := range rules {
- if i2 != i1 && len(re1.FindStringSubmatchIndex(r2)) > 0 {
- // log if we care about ordering and the superset occurs before
- if !orderingDisabled && i1 < i2 {
- level.Warn(logger).Log("msg", "match is a super set of match but in a lower order, the first will never be matched", "first_match", r1, "second_match", r2)
- }
- currentRuleNeedBacktrack = false
- }
- }
- for i2, re2 := range rulesRE {
- if i2 == i1 || re2 == nil {
- continue
- }
- // if r1 is a subset of other rule, we don't need backtrack
- // because either we turned on ordering
- // or we disabled ordering and can't match it even with backtrack
- if len(re2.FindStringSubmatchIndex(r1)) > 0 {
- currentRuleNeedBacktrack = false
- }
- }
-
- if currentRuleNeedBacktrack {
- level.Warn(logger).Log("msg", "backtracking required because of match. Performance may be degraded", "match", r1)
- backtrackingNeeded = true
- }
- }
- }
-
- // backtracking will always be needed if ordering of rules is not disabled
- // since transistions are stored in (unordered) map
- // note: don't move this branch to the beginning of this function
- // since we need logs for superset rules
-
- return !orderingDisabled || backtrackingNeeded
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/fsm.png b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/fsm.png
deleted file mode 100644
index ef87d92a4..000000000
Binary files a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/fsm.png and /dev/null differ
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/minmax.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/minmax.go
deleted file mode 100644
index 95bd9c55b..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/minmax.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fsm
-
-// min and max implementation for integer
-
-func min(x, y int) int {
- if x < y {
- return x
- }
- return y
-}
-
-func max(x, y int) int {
- if x > y {
- return x
- }
- return y
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper.go
deleted file mode 100644
index 879f18c59..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper.go
+++ /dev/null
@@ -1,386 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mapper
-
-import (
- "fmt"
- "os"
- "regexp"
- "sync"
- "time"
-
- "github.com/go-kit/log"
- "github.com/prometheus/client_golang/prometheus"
- "gopkg.in/yaml.v2"
-
- "github.com/prometheus/statsd_exporter/pkg/level"
- "github.com/prometheus/statsd_exporter/pkg/mapper/fsm"
-)
-
-var (
- // The first segment of a match cannot start with a number
- statsdMetricRE = `[a-zA-Z_]([a-zA-Z0-9_\-])*`
- // The subsequent segments of a match can start with a number
- // See https://github.com/prometheus/statsd_exporter/issues/328
- statsdMetricSubsequentRE = `[a-zA-Z0-9_]([a-zA-Z0-9_\-])*`
- templateReplaceRE = `(\$\{?\d+\}?)`
-
- metricLineRE = regexp.MustCompile(`^(\*|` + statsdMetricRE + `)(\.\*|\.` + statsdMetricSubsequentRE + `)*$`)
- metricNameRE = regexp.MustCompile(`^([a-zA-Z_]|` + templateReplaceRE + `)([a-zA-Z0-9_]|` + templateReplaceRE + `)*$`)
- labelNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]+$`)
-)
-
-type MetricMapper struct {
- Registerer prometheus.Registerer
- Defaults mapperConfigDefaults `yaml:"defaults"`
- Mappings []MetricMapping `yaml:"mappings"`
- FSM *fsm.FSM
- doFSM bool
- doRegex bool
- cache MetricMapperCache
- mutex sync.RWMutex
-
- MappingsCount prometheus.Gauge
-
- Logger log.Logger
-}
-
-type SummaryOptions struct {
- Quantiles []metricObjective `yaml:"quantiles"`
- MaxAge time.Duration `yaml:"max_age"`
- AgeBuckets uint32 `yaml:"age_buckets"`
- BufCap uint32 `yaml:"buf_cap"`
-}
-
-type HistogramOptions struct {
- Buckets []float64 `yaml:"buckets"`
-}
-
-type metricObjective struct {
- Quantile float64 `yaml:"quantile"`
- Error float64 `yaml:"error"`
-}
-
-var defaultQuantiles = []metricObjective{
- {Quantile: 0.5, Error: 0.05},
- {Quantile: 0.9, Error: 0.01},
- {Quantile: 0.99, Error: 0.001},
-}
-
-func (m *MetricMapper) InitFromYAMLString(fileContents string) error {
- var n MetricMapper
-
- if err := yaml.Unmarshal([]byte(fileContents), &n); err != nil {
- return err
- }
-
- if len(n.Defaults.HistogramOptions.Buckets) == 0 {
- n.Defaults.HistogramOptions.Buckets = prometheus.DefBuckets
- }
-
- if len(n.Defaults.SummaryOptions.Quantiles) == 0 {
- n.Defaults.SummaryOptions.Quantiles = defaultQuantiles
- }
-
- if n.Defaults.MatchType == MatchTypeDefault {
- n.Defaults.MatchType = MatchTypeGlob
- }
-
- remainingMappingsCount := len(n.Mappings)
-
- n.FSM = fsm.NewFSM([]string{string(MetricTypeCounter), string(MetricTypeGauge), string(MetricTypeObserver)},
- remainingMappingsCount, n.Defaults.GlobDisableOrdering)
-
- for i := range n.Mappings {
- remainingMappingsCount--
-
- currentMapping := &n.Mappings[i]
-
- // check that label is correct
- for k := range currentMapping.Labels {
- if !labelNameRE.MatchString(k) {
- return fmt.Errorf("invalid label key: %s", k)
- }
- }
-
- if currentMapping.Name == "" {
- return fmt.Errorf("line %d: metric mapping didn't set a metric name", i)
- }
-
- if !metricNameRE.MatchString(currentMapping.Name) {
- return fmt.Errorf("metric name '%s' doesn't match regex '%s'", currentMapping.Name, metricNameRE)
- }
-
- if currentMapping.MatchType == "" {
- currentMapping.MatchType = n.Defaults.MatchType
- }
-
- if currentMapping.Action == "" {
- currentMapping.Action = ActionTypeMap
- }
-
- if currentMapping.MatchType == MatchTypeGlob {
- n.doFSM = true
- if !metricLineRE.MatchString(currentMapping.Match) {
- return fmt.Errorf("invalid match: %s", currentMapping.Match)
- }
-
- captureCount := n.FSM.AddState(currentMapping.Match, string(currentMapping.MatchMetricType),
- remainingMappingsCount, currentMapping)
-
- currentMapping.nameFormatter = fsm.NewTemplateFormatter(currentMapping.Name, captureCount)
-
- labelKeys := make([]string, len(currentMapping.Labels))
- labelFormatters := make([]*fsm.TemplateFormatter, len(currentMapping.Labels))
- labelIndex := 0
- for label, valueExpr := range currentMapping.Labels {
- labelKeys[labelIndex] = label
- labelFormatters[labelIndex] = fsm.NewTemplateFormatter(valueExpr, captureCount)
- labelIndex++
- }
- currentMapping.labelFormatters = labelFormatters
- currentMapping.labelKeys = labelKeys
- } else {
- if regex, err := regexp.Compile(currentMapping.Match); err != nil {
- return fmt.Errorf("invalid regex %s in mapping: %v", currentMapping.Match, err)
- } else {
- currentMapping.regex = regex
- }
- n.doRegex = true
- }
-
- if currentMapping.ObserverType == "" {
- currentMapping.ObserverType = n.Defaults.ObserverType
- }
-
- if currentMapping.LegacyQuantiles != nil &&
- (currentMapping.SummaryOptions == nil || currentMapping.SummaryOptions.Quantiles != nil) {
- level.Warn(m.Logger).Log("msg", "using the top level quantiles is deprecated. Please use quantiles in the summary_options hierarchy")
- }
-
- if currentMapping.LegacyBuckets != nil &&
- (currentMapping.HistogramOptions == nil || currentMapping.HistogramOptions.Buckets != nil) {
- level.Warn(m.Logger).Log("msg", "using the top level buckets is deprecated. Please use buckets in the histogram_options hierarchy")
- }
-
- if currentMapping.SummaryOptions != nil &&
- currentMapping.LegacyQuantiles != nil &&
- currentMapping.SummaryOptions.Quantiles != nil {
- return fmt.Errorf("cannot use quantiles in both the top level and summary options at the same time in %s", currentMapping.Match)
- }
-
- if currentMapping.HistogramOptions != nil &&
- currentMapping.LegacyBuckets != nil &&
- currentMapping.HistogramOptions.Buckets != nil {
- return fmt.Errorf("cannot use buckets in both the top level and histogram options at the same time in %s", currentMapping.Match)
- }
-
- if currentMapping.ObserverType == ObserverTypeHistogram {
- if currentMapping.SummaryOptions != nil {
- return fmt.Errorf("cannot use histogram observer and summary options at the same time")
- }
- if currentMapping.HistogramOptions == nil {
- currentMapping.HistogramOptions = &HistogramOptions{}
- }
- if currentMapping.LegacyBuckets != nil && len(currentMapping.LegacyBuckets) != 0 {
- currentMapping.HistogramOptions.Buckets = currentMapping.LegacyBuckets
- }
- if currentMapping.HistogramOptions.Buckets == nil || len(currentMapping.HistogramOptions.Buckets) == 0 {
- currentMapping.HistogramOptions.Buckets = n.Defaults.HistogramOptions.Buckets
- }
- }
-
- if currentMapping.ObserverType == ObserverTypeSummary {
- if currentMapping.HistogramOptions != nil {
- return fmt.Errorf("cannot use summary observer and histogram options at the same time")
- }
- if currentMapping.SummaryOptions == nil {
- currentMapping.SummaryOptions = &SummaryOptions{}
- }
- if currentMapping.LegacyQuantiles != nil && len(currentMapping.LegacyQuantiles) != 0 {
- currentMapping.SummaryOptions.Quantiles = currentMapping.LegacyQuantiles
- }
- if currentMapping.SummaryOptions.Quantiles == nil || len(currentMapping.SummaryOptions.Quantiles) == 0 {
- currentMapping.SummaryOptions.Quantiles = n.Defaults.SummaryOptions.Quantiles
- }
- if currentMapping.SummaryOptions.MaxAge == 0 {
- currentMapping.SummaryOptions.MaxAge = n.Defaults.SummaryOptions.MaxAge
- }
- if currentMapping.SummaryOptions.AgeBuckets == 0 {
- currentMapping.SummaryOptions.AgeBuckets = n.Defaults.SummaryOptions.AgeBuckets
- }
- if currentMapping.SummaryOptions.BufCap == 0 {
- currentMapping.SummaryOptions.BufCap = n.Defaults.SummaryOptions.BufCap
- }
- }
-
- if currentMapping.Ttl == 0 && n.Defaults.Ttl > 0 {
- currentMapping.Ttl = n.Defaults.Ttl
- }
- }
-
- m.mutex.Lock()
- defer m.mutex.Unlock()
-
- if m.Logger == nil {
- m.Logger = log.NewNopLogger()
- }
-
- m.Defaults = n.Defaults
- m.Mappings = n.Mappings
-
- // Reset the cache since this function can be used to reload config
- if m.cache != nil {
- m.cache.Reset()
- }
-
- if n.doFSM {
- var mappings []string
- for _, mapping := range n.Mappings {
- if mapping.MatchType == MatchTypeGlob {
- mappings = append(mappings, mapping.Match)
- }
- }
- n.FSM.BacktrackingNeeded = fsm.TestIfNeedBacktracking(mappings, n.FSM.OrderingDisabled, m.Logger)
-
- m.FSM = n.FSM
- m.doRegex = n.doRegex
- }
- m.doFSM = n.doFSM
-
- if m.MappingsCount != nil {
- m.MappingsCount.Set(float64(len(n.Mappings)))
- }
-
- return nil
-}
-
-func (m *MetricMapper) InitFromFile(fileName string) error {
- mappingStr, err := os.ReadFile(fileName)
- if err != nil {
- return err
- }
-
- return m.InitFromYAMLString(string(mappingStr))
-}
-
-// UseCache tells the mapper to use a cache that implements the MetricMapperCache interface.
-// This cache MUST be thread-safe!
-func (m *MetricMapper) UseCache(cache MetricMapperCache) {
- m.mutex.Lock()
- defer m.mutex.Unlock()
- m.cache = cache
-}
-
-func (m *MetricMapper) GetMapping(statsdMetric string, statsdMetricType MetricType) (*MetricMapping, prometheus.Labels, bool) {
- m.mutex.RLock()
- defer m.mutex.RUnlock()
-
- // only use a cache if one is present
- if m.cache != nil {
- result, cached := m.cache.Get(formatKey(statsdMetric, statsdMetricType))
- if cached {
- r := result.(MetricMapperCacheResult)
- return r.Mapping, r.Labels, r.Matched
- }
- }
-
- // glob matching
- if m.doFSM {
- finalState, captures := m.FSM.GetMapping(statsdMetric, string(statsdMetricType))
- if finalState != nil && finalState.Result != nil {
- v := finalState.Result.(*MetricMapping)
- result := copyMetricMapping(v)
- result.Name = result.nameFormatter.Format(captures)
-
- labels := prometheus.Labels{}
- for index, formatter := range result.labelFormatters {
- labels[result.labelKeys[index]] = formatter.Format(captures)
- }
-
- r := MetricMapperCacheResult{
- Mapping: result,
- Matched: true,
- Labels: labels,
- }
- // add match to cache
- if m.cache != nil {
- m.cache.Add(formatKey(statsdMetric, statsdMetricType), r)
- }
-
- return result, labels, true
- } else if !m.doRegex {
- // if there's no regex match type, return immediately
- // Add miss to cache
- if m.cache != nil {
- m.cache.Add(formatKey(statsdMetric, statsdMetricType), MetricMapperCacheResult{})
- }
- return nil, nil, false
- }
- }
-
- // regex matching
- for _, mapping := range m.Mappings {
- // if a rule don't have regex matching type, the regex field is unset
- if mapping.regex == nil {
- continue
- }
- matches := mapping.regex.FindStringSubmatchIndex(statsdMetric)
- if len(matches) == 0 {
- continue
- }
-
- mapping.Name = string(mapping.regex.ExpandString(
- []byte{},
- mapping.Name,
- statsdMetric,
- matches,
- ))
-
- if mt := mapping.MatchMetricType; mt != "" && mt != statsdMetricType {
- continue
- }
-
- labels := prometheus.Labels{}
- for label, valueExpr := range mapping.Labels {
- value := mapping.regex.ExpandString([]byte{}, valueExpr, statsdMetric, matches)
- labels[label] = string(value)
- }
-
- r := MetricMapperCacheResult{
- Mapping: &mapping,
- Matched: true,
- Labels: labels,
- }
- // Add Match to cache
- if m.cache != nil {
- m.cache.Add(formatKey(statsdMetric, statsdMetricType), r)
- }
-
- return &mapping, labels, true
- }
-
- // Add Miss to cache
- if m.cache != nil {
- m.cache.Add(formatKey(statsdMetric, statsdMetricType), MetricMapperCacheResult{})
- }
- return nil, nil, false
-}
-
-// make a shallow copy so that we do not overwrite name
-// as multiple names can be matched by same mapping
-func copyMetricMapping(in *MetricMapping) *MetricMapping {
- out := *in
- return &out
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_cache.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_cache.go
deleted file mode 100644
index 9d65f8c2c..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_cache.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mapper
-
-import (
- "github.com/prometheus/client_golang/prometheus"
-)
-
-type CacheMetrics struct {
- CacheLength prometheus.Gauge
- CacheGetsTotal prometheus.Counter
- CacheHitsTotal prometheus.Counter
-}
-
-func NewCacheMetrics(reg prometheus.Registerer) *CacheMetrics {
- var m CacheMetrics
-
- m.CacheLength = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Name: "statsd_metric_mapper_cache_length",
- Help: "The count of unique metrics currently cached.",
- },
- )
- m.CacheGetsTotal = prometheus.NewCounter(
- prometheus.CounterOpts{
- Name: "statsd_metric_mapper_cache_gets_total",
- Help: "The count of total metric cache gets.",
- },
- )
- m.CacheHitsTotal = prometheus.NewCounter(
- prometheus.CounterOpts{
- Name: "statsd_metric_mapper_cache_hits_total",
- Help: "The count of total metric cache hits.",
- },
- )
-
- if reg != nil {
- reg.MustRegister(m.CacheLength)
- reg.MustRegister(m.CacheGetsTotal)
- reg.MustRegister(m.CacheHitsTotal)
- }
- return &m
-}
-
-type MetricMapperCacheResult struct {
- Mapping *MetricMapping
- Matched bool
- Labels prometheus.Labels
-}
-
-// MetricMapperCache MUST be thread-safe and should be instrumented with CacheMetrics
-type MetricMapperCache interface {
- // Get a cached result
- Get(metricKey string) (interface{}, bool)
- // Add a statsd MetricMapperResult to the cache
- Add(metricKey string, result interface{}) // Add an item to the cache
- // Reset clears the cache for config reloads
- Reset()
-}
-
-func formatKey(metricString string, metricType MetricType) string {
- return string(metricType) + "." + metricString
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_defaults.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_defaults.go
deleted file mode 100644
index 754a677b7..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_defaults.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mapper
-
-import "time"
-
-type mapperConfigDefaults struct {
- ObserverType ObserverType `yaml:"observer_type"`
- MatchType MatchType `yaml:"match_type"`
- GlobDisableOrdering bool `yaml:"glob_disable_ordering"`
- Ttl time.Duration `yaml:"ttl"`
- SummaryOptions SummaryOptions `yaml:"summary_options"`
- HistogramOptions HistogramOptions `yaml:"histogram_options"`
-}
-
-// mapperConfigDefaultsAlias is used to unmarshal the yaml config into mapperConfigDefaults and allows deprecated fields
-type mapperConfigDefaultsAlias struct {
- ObserverType ObserverType `yaml:"observer_type"`
- TimerType ObserverType `yaml:"timer_type,omitempty"` // DEPRECATED - field only present to preserve backwards compatibility in configs
- Buckets []float64 `yaml:"buckets"` // DEPRECATED - field only present to preserve backwards compatibility in configs
- Quantiles []metricObjective `yaml:"quantiles"` // DEPRECATED - field only present to preserve backwards compatibility in configs
- MatchType MatchType `yaml:"match_type"`
- GlobDisableOrdering bool `yaml:"glob_disable_ordering"`
- Ttl time.Duration `yaml:"ttl"`
- SummaryOptions SummaryOptions `yaml:"summary_options"`
- HistogramOptions HistogramOptions `yaml:"histogram_options"`
-}
-
-// UnmarshalYAML is a custom unmarshal function to allow use of deprecated config keys
-// observer_type will override timer_type
-func (d *mapperConfigDefaults) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var tmp mapperConfigDefaultsAlias
- if err := unmarshal(&tmp); err != nil {
- return err
- }
-
- // Copy defaults
- d.ObserverType = tmp.ObserverType
- d.MatchType = tmp.MatchType
- d.GlobDisableOrdering = tmp.GlobDisableOrdering
- d.Ttl = tmp.Ttl
- d.SummaryOptions = tmp.SummaryOptions
- d.HistogramOptions = tmp.HistogramOptions
-
- // Use deprecated TimerType if necessary
- if tmp.ObserverType == "" {
- d.ObserverType = tmp.TimerType
- }
-
- // Use deprecated quantiles if necessary
- if len(tmp.SummaryOptions.Quantiles) == 0 && len(tmp.Quantiles) > 0 {
- d.SummaryOptions = SummaryOptions{Quantiles: tmp.Quantiles}
- }
-
- // Use deprecated buckets if necessary
- if len(tmp.HistogramOptions.Buckets) == 0 && len(tmp.Buckets) > 0 {
- d.HistogramOptions = HistogramOptions{Buckets: tmp.Buckets}
- }
-
- return nil
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapping.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapping.go
deleted file mode 100644
index cdba27afb..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapping.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either xpress or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mapper
-
-import (
- "regexp"
- "time"
-
- "github.com/prometheus/client_golang/prometheus"
-
- "github.com/prometheus/statsd_exporter/pkg/mapper/fsm"
-)
-
-type MetricMapping struct {
- Match string `yaml:"match"`
- Name string `yaml:"name"`
- nameFormatter *fsm.TemplateFormatter
- regex *regexp.Regexp
- Labels prometheus.Labels `yaml:"labels"`
- labelKeys []string
- labelFormatters []*fsm.TemplateFormatter
- ObserverType ObserverType `yaml:"observer_type"`
- TimerType ObserverType `yaml:"timer_type,omitempty"` // DEPRECATED - field only present to preserve backwards compatibility in configs. Always empty
- LegacyBuckets []float64 `yaml:"buckets"`
- LegacyQuantiles []metricObjective `yaml:"quantiles"`
- MatchType MatchType `yaml:"match_type"`
- HelpText string `yaml:"help"`
- Action ActionType `yaml:"action"`
- MatchMetricType MetricType `yaml:"match_metric_type"`
- Ttl time.Duration `yaml:"ttl"`
- SummaryOptions *SummaryOptions `yaml:"summary_options"`
- HistogramOptions *HistogramOptions `yaml:"histogram_options"`
-}
-
-// UnmarshalYAML is a custom unmarshal function to allow use of deprecated config keys
-// observer_type will override timer_type
-func (m *MetricMapping) UnmarshalYAML(unmarshal func(interface{}) error) error {
- type MetricMappingAlias MetricMapping
- var tmp MetricMappingAlias
- if err := unmarshal(&tmp); err != nil {
- return err
- }
-
- // Copy defaults
- m.Match = tmp.Match
- m.Name = tmp.Name
- m.Labels = tmp.Labels
- m.ObserverType = tmp.ObserverType
- m.LegacyBuckets = tmp.LegacyBuckets
- m.LegacyQuantiles = tmp.LegacyQuantiles
- m.MatchType = tmp.MatchType
- m.HelpText = tmp.HelpText
- m.Action = tmp.Action
- m.MatchMetricType = tmp.MatchMetricType
- m.Ttl = tmp.Ttl
- m.SummaryOptions = tmp.SummaryOptions
- m.HistogramOptions = tmp.HistogramOptions
-
- // Use deprecated TimerType if necessary
- if tmp.ObserverType == "" {
- m.ObserverType = tmp.TimerType
- }
-
- return nil
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/match.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/match.go
deleted file mode 100644
index 12d5e8d6b..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/match.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mapper
-
-import "fmt"
-
-type MatchType string
-
-const (
- MatchTypeGlob MatchType = "glob"
- MatchTypeRegex MatchType = "regex"
- MatchTypeDefault MatchType = ""
-)
-
-func (t *MatchType) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var v string
- if err := unmarshal(&v); err != nil {
- return err
- }
-
- switch MatchType(v) {
- case MatchTypeRegex:
- *t = MatchTypeRegex
- case MatchTypeGlob, MatchTypeDefault:
- *t = MatchTypeGlob
- default:
- return fmt.Errorf("invalid match type %q", v)
- }
- return nil
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/metric_type.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/metric_type.go
deleted file mode 100644
index 920c16ed3..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/metric_type.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mapper
-
-import "fmt"
-
-type MetricType string
-
-const (
- MetricTypeCounter MetricType = "counter"
- MetricTypeGauge MetricType = "gauge"
- MetricTypeObserver MetricType = "observer"
- MetricTypeTimer MetricType = "timer" // DEPRECATED
-)
-
-func (m *MetricType) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var v string
- if err := unmarshal(&v); err != nil {
- return err
- }
-
- switch MetricType(v) {
- case MetricTypeCounter:
- *m = MetricTypeCounter
- case MetricTypeGauge:
- *m = MetricTypeGauge
- case MetricTypeObserver:
- *m = MetricTypeObserver
- case MetricTypeTimer:
- *m = MetricTypeObserver
- default:
- return fmt.Errorf("invalid metric type '%s'", v)
- }
- return nil
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/observer.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/observer.go
deleted file mode 100644
index 3d5da7eab..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/observer.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mapper
-
-import "fmt"
-
-type ObserverType string
-
-const (
- ObserverTypeHistogram ObserverType = "histogram"
- ObserverTypeSummary ObserverType = "summary"
- ObserverTypeDefault ObserverType = ""
-)
-
-func (t *ObserverType) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var v string
- if err := unmarshal(&v); err != nil {
- return err
- }
-
- switch ObserverType(v) {
- case ObserverTypeHistogram:
- *t = ObserverTypeHistogram
- case ObserverTypeSummary, ObserverTypeDefault:
- *t = ObserverTypeSummary
- default:
- return fmt.Errorf("invalid observer type '%s'", v)
- }
- return nil
-}
diff --git a/vendor/go.opencensus.io/.gitignore b/vendor/go.opencensus.io/.gitignore
deleted file mode 100644
index 74a6db472..000000000
--- a/vendor/go.opencensus.io/.gitignore
+++ /dev/null
@@ -1,9 +0,0 @@
-/.idea/
-
-# go.opencensus.io/exporter/aws
-/exporter/aws/
-
-# Exclude vendor, use dep ensure after checkout:
-/vendor/github.com/
-/vendor/golang.org/
-/vendor/google.golang.org/
diff --git a/vendor/go.opencensus.io/AUTHORS b/vendor/go.opencensus.io/AUTHORS
deleted file mode 100644
index e491a9e7f..000000000
--- a/vendor/go.opencensus.io/AUTHORS
+++ /dev/null
@@ -1 +0,0 @@
-Google Inc.
diff --git a/vendor/go.opencensus.io/CONTRIBUTING.md b/vendor/go.opencensus.io/CONTRIBUTING.md
deleted file mode 100644
index 1ba3962c8..000000000
--- a/vendor/go.opencensus.io/CONTRIBUTING.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# How to contribute
-
-We'd love to accept your patches and contributions to this project. There are
-just a few small guidelines you need to follow.
-
-## Contributor License Agreement
-
-Contributions to this project must be accompanied by a Contributor License
-Agreement. You (or your employer) retain the copyright to your contribution,
-this simply gives us permission to use and redistribute your contributions as
-part of the project. Head over to to see
-your current agreements on file or to sign a new one.
-
-You generally only need to submit a CLA once, so if you've already submitted one
-(even if it was for a different project), you probably don't need to do it
-again.
-
-## Code reviews
-
-All submissions, including submissions by project members, require review. We
-use GitHub pull requests for this purpose. Consult [GitHub Help] for more
-information on using pull requests.
-
-[GitHub Help]: https://help.github.com/articles/about-pull-requests/
-
-## Instructions
-
-Fork the repo, checkout the upstream repo to your GOPATH by:
-
-```
-$ go get -d go.opencensus.io
-```
-
-Add your fork as an origin:
-
-```
-cd $(go env GOPATH)/src/go.opencensus.io
-git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git
-```
-
-Run tests:
-
-```
-$ make install-tools # Only first time.
-$ make
-```
-
-Checkout a new branch, make modifications and push the branch to your fork:
-
-```
-$ git checkout -b feature
-# edit files
-$ git commit
-$ git push fork feature
-```
-
-Open a pull request against the main opencensus-go repo.
-
-## General Notes
-This project uses Appveyor and Travis for CI.
-
-The dependencies are managed with `go mod` if you work with the sources under your
-`$GOPATH` you need to set the environment variable `GO111MODULE=on`.
\ No newline at end of file
diff --git a/vendor/go.opencensus.io/LICENSE b/vendor/go.opencensus.io/LICENSE
deleted file mode 100644
index 7a4a3ea24..000000000
--- a/vendor/go.opencensus.io/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
\ No newline at end of file
diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile
deleted file mode 100644
index d896edc99..000000000
--- a/vendor/go.opencensus.io/Makefile
+++ /dev/null
@@ -1,97 +0,0 @@
-# TODO: Fix this on windows.
-ALL_SRC := $(shell find . -name '*.go' \
- -not -path './vendor/*' \
- -not -path '*/gen-go/*' \
- -type f | sort)
-ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC))))
-
-GOTEST_OPT?=-v -race -timeout 30s
-GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic
-GOTEST=go test
-GOIMPORTS=goimports
-GOLINT=golint
-GOVET=go vet
-EMBEDMD=embedmd
-# TODO decide if we need to change these names.
-TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packages"
-TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages"
-README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ')
-
-.DEFAULT_GOAL := imports-lint-vet-embedmd-test
-
-.PHONY: imports-lint-vet-embedmd-test
-imports-lint-vet-embedmd-test: imports lint vet embedmd test
-
-# TODO enable test-with-coverage in tavis
-.PHONY: travis-ci
-travis-ci: imports lint vet embedmd test test-386
-
-all-pkgs:
- @echo $(ALL_PKGS) | tr ' ' '\n' | sort
-
-all-srcs:
- @echo $(ALL_SRC) | tr ' ' '\n' | sort
-
-.PHONY: test
-test:
- $(GOTEST) $(GOTEST_OPT) $(ALL_PKGS)
-
-.PHONY: test-386
-test-386:
- GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS)
-
-.PHONY: test-with-coverage
-test-with-coverage:
- $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS)
-
-.PHONY: imports
-imports:
- @IMPORTSOUT=`$(GOIMPORTS) -l $(ALL_SRC) 2>&1`; \
- if [ "$$IMPORTSOUT" ]; then \
- echo "$(GOIMPORTS) FAILED => goimports the following files:\n"; \
- echo "$$IMPORTSOUT\n"; \
- exit 1; \
- else \
- echo "Imports finished successfully"; \
- fi
-
-.PHONY: lint
-lint:
- @LINTOUT=`$(GOLINT) $(ALL_PKGS) | grep -v $(TRACE_ID_LINT_EXCEPTION) | grep -v $(TRACE_OPTION_LINT_EXCEPTION) 2>&1`; \
- if [ "$$LINTOUT" ]; then \
- echo "$(GOLINT) FAILED => clean the following lint errors:\n"; \
- echo "$$LINTOUT\n"; \
- exit 1; \
- else \
- echo "Lint finished successfully"; \
- fi
-
-.PHONY: vet
-vet:
- # TODO: Understand why go vet downloads "github.com/google/go-cmp v0.2.0"
- @VETOUT=`$(GOVET) ./... | grep -v "go: downloading" 2>&1`; \
- if [ "$$VETOUT" ]; then \
- echo "$(GOVET) FAILED => go vet the following files:\n"; \
- echo "$$VETOUT\n"; \
- exit 1; \
- else \
- echo "Vet finished successfully"; \
- fi
-
-.PHONY: embedmd
-embedmd:
- @EMBEDMDOUT=`$(EMBEDMD) -d $(README_FILES) 2>&1`; \
- if [ "$$EMBEDMDOUT" ]; then \
- echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \
- echo "$$EMBEDMDOUT\n"; \
- exit 1; \
- else \
- echo "Embedmd finished successfully"; \
- fi
-
-.PHONY: install-tools
-install-tools:
- go install golang.org/x/lint/golint@latest
- go install golang.org/x/tools/cmd/cover@latest
- go install golang.org/x/tools/cmd/goimports@latest
- go install github.com/rakyll/embedmd@latest
diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md
deleted file mode 100644
index 1d7e83711..000000000
--- a/vendor/go.opencensus.io/README.md
+++ /dev/null
@@ -1,267 +0,0 @@
-# OpenCensus Libraries for Go
-
-[![Build Status][travis-image]][travis-url]
-[![Windows Build Status][appveyor-image]][appveyor-url]
-[![GoDoc][godoc-image]][godoc-url]
-[![Gitter chat][gitter-image]][gitter-url]
-
-OpenCensus Go is a Go implementation of OpenCensus, a toolkit for
-collecting application performance and behavior monitoring data.
-Currently it consists of three major components: tags, stats and tracing.
-
-#### OpenCensus and OpenTracing have merged to form OpenTelemetry, which serves as the next major version of OpenCensus and OpenTracing. OpenTelemetry will offer backwards compatibility with existing OpenCensus integrations, and we will continue to make security patches to existing OpenCensus libraries for two years. Read more about the merger [here](https://medium.com/opentracing/a-roadmap-to-convergence-b074e5815289).
-
-## Installation
-
-```
-$ go get -u go.opencensus.io
-```
-
-The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy).
-The use of vendoring or a dependency management tool is recommended.
-
-## Prerequisites
-
-OpenCensus Go libraries require Go 1.8 or later.
-
-## Getting Started
-
-The easiest way to get started using OpenCensus in your application is to use an existing
-integration with your RPC framework:
-
-* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp)
-* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc)
-* [database/sql](https://godoc.org/github.com/opencensus-integrations/ocsql)
-* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus)
-* [Groupcache](https://godoc.org/github.com/orijtech/groupcache)
-* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy)
-* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver)
-* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo)
-* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis)
-* [Memcache](https://godoc.org/github.com/orijtech/gomemcache)
-
-If you're using a framework not listed here, you could either implement your own middleware for your
-framework or use [custom stats](#stats) and [spans](#spans) directly in your application.
-
-## Exporters
-
-OpenCensus can export instrumentation data to various backends.
-OpenCensus has exporter implementations for the following, users
-can implement their own exporters by implementing the exporter interfaces
-([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter),
-[trace](https://godoc.org/go.opencensus.io/trace#Exporter)):
-
-* [Prometheus][exporter-prom] for stats
-* [OpenZipkin][exporter-zipkin] for traces
-* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces
-* [Jaeger][exporter-jaeger] for traces
-* [AWS X-Ray][exporter-xray] for traces
-* [Datadog][exporter-datadog] for stats and traces
-* [Graphite][exporter-graphite] for stats
-* [Honeycomb][exporter-honeycomb] for traces
-* [New Relic][exporter-newrelic] for stats and traces
-
-## Overview
-
-
-
-In a microservices environment, a user request may go through
-multiple services until there is a response. OpenCensus allows
-you to instrument your services and collect diagnostics data all
-through your services end-to-end.
-
-## Tags
-
-Tags represent propagated key-value pairs. They are propagated using `context.Context`
-in the same process or can be encoded to be transmitted on the wire. Usually, this will
-be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler`
-for gRPC.
-
-Package `tag` allows adding or modifying tags in the current context.
-
-[embedmd]:# (internal/readme/tags.go new)
-```go
-ctx, err := tag.New(ctx,
- tag.Insert(osKey, "macOS-10.12.5"),
- tag.Upsert(userIDKey, "cde36753ed"),
-)
-if err != nil {
- log.Fatal(err)
-}
-```
-
-## Stats
-
-OpenCensus is a low-overhead framework even if instrumentation is always enabled.
-In order to be so, it is optimized to make recording of data points fast
-and separate from the data aggregation.
-
-OpenCensus stats collection happens in two stages:
-
-* Definition of measures and recording of data points
-* Definition of views and aggregation of the recorded data
-
-### Recording
-
-Measurements are data points associated with a measure.
-Recording implicitly tags the set of Measurements with the tags from the
-provided context:
-
-[embedmd]:# (internal/readme/stats.go record)
-```go
-stats.Record(ctx, videoSize.M(102478))
-```
-
-### Views
-
-Views are how Measures are aggregated. You can think of them as queries over the
-set of recorded data points (measurements).
-
-Views have two parts: the tags to group by and the aggregation type used.
-
-Currently three types of aggregations are supported:
-* CountAggregation is used to count the number of times a sample was recorded.
-* DistributionAggregation is used to provide a histogram of the values of the samples.
-* SumAggregation is used to sum up all sample values.
-
-[embedmd]:# (internal/readme/stats.go aggs)
-```go
-distAgg := view.Distribution(1<<32, 2<<32, 3<<32)
-countAgg := view.Count()
-sumAgg := view.Sum()
-```
-
-Here we create a view with the DistributionAggregation over our measure.
-
-[embedmd]:# (internal/readme/stats.go view)
-```go
-if err := view.Register(&view.View{
- Name: "example.com/video_size_distribution",
- Description: "distribution of processed video size over time",
- Measure: videoSize,
- Aggregation: view.Distribution(1<<32, 2<<32, 3<<32),
-}); err != nil {
- log.Fatalf("Failed to register view: %v", err)
-}
-```
-
-Register begins collecting data for the view. Registered views' data will be
-exported via the registered exporters.
-
-## Traces
-
-A distributed trace tracks the progression of a single user request as
-it is handled by the services and processes that make up an application.
-Each step is called a span in the trace. Spans include metadata about the step,
-including especially the time spent in the step, called the span’s latency.
-
-Below you see a trace and several spans underneath it.
-
-
-
-### Spans
-
-Span is the unit step in a trace. Each span has a name, latency, status and
-additional metadata.
-
-Below we are starting a span for a cache read and ending it
-when we are done:
-
-[embedmd]:# (internal/readme/trace.go startend)
-```go
-ctx, span := trace.StartSpan(ctx, "cache.Get")
-defer span.End()
-
-// Do work to get from cache.
-```
-
-### Propagation
-
-Spans can have parents or can be root spans if they don't have any parents.
-The current span is propagated in-process and across the network to allow associating
-new child spans with the parent.
-
-In the same process, `context.Context` is used to propagate spans.
-`trace.StartSpan` creates a new span as a root if the current context
-doesn't contain a span. Or, it creates a child of the span that is
-already in current context. The returned context can be used to keep
-propagating the newly created span in the current context.
-
-[embedmd]:# (internal/readme/trace.go startend)
-```go
-ctx, span := trace.StartSpan(ctx, "cache.Get")
-defer span.End()
-
-// Do work to get from cache.
-```
-
-Across the network, OpenCensus provides different propagation
-methods for different protocols.
-
-* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation).
-* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation)
- by default but can be configured to use a custom propagation method by setting another
- [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat).
-
-## Execution Tracer
-
-With Go 1.11, OpenCensus Go will support integration with the Go execution tracer.
-See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68)
-for an example of their mutual use.
-
-## Profiles
-
-OpenCensus tags can be applied as profiler labels
-for users who are on Go 1.9 and above.
-
-[embedmd]:# (internal/readme/tags.go profiler)
-```go
-ctx, err = tag.New(ctx,
- tag.Insert(osKey, "macOS-10.12.5"),
- tag.Insert(userIDKey, "fff0989878"),
-)
-if err != nil {
- log.Fatal(err)
-}
-tag.Do(ctx, func(ctx context.Context) {
- // Do work.
- // When profiling is on, samples will be
- // recorded with the key/values from the tag map.
-})
-```
-
-A screenshot of the CPU profile from the program above:
-
-
-
-## Deprecation Policy
-
-Before version 1.0.0, the following deprecation policy will be observed:
-
-No backwards-incompatible changes will be made except for the removal of symbols that have
-been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release
-removing the *Deprecated* functionality will be made no sooner than 28 days after the first
-release in which the functionality was marked *Deprecated*.
-
-[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master
-[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go
-[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true
-[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master
-[godoc-image]: https://godoc.org/go.opencensus.io?status.svg
-[godoc-url]: https://godoc.org/go.opencensus.io
-[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg
-[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
-
-
-[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap
-[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace
-
-[exporter-prom]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus
-[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver
-[exporter-zipkin]: https://godoc.org/contrib.go.opencensus.io/exporter/zipkin
-[exporter-jaeger]: https://godoc.org/contrib.go.opencensus.io/exporter/jaeger
-[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws
-[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog
-[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite
-[exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter
-[exporter-newrelic]: https://github.com/newrelic/newrelic-opencensus-exporter-go
diff --git a/vendor/go.opencensus.io/appveyor.yml b/vendor/go.opencensus.io/appveyor.yml
deleted file mode 100644
index d08f0edaf..000000000
--- a/vendor/go.opencensus.io/appveyor.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-version: "{build}"
-
-platform: x64
-
-clone_folder: c:\gopath\src\go.opencensus.io
-
-environment:
- GOPATH: 'c:\gopath'
- GO111MODULE: 'on'
- CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613
-
-stack: go 1.11
-
-before_test:
- - go version
- - go env
-
-build: false
-deploy: false
-
-test_script:
- - cd %APPVEYOR_BUILD_FOLDER%
- - go build -v .\...
- - go test -v .\... # No -race because cgo is disabled
diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go
deleted file mode 100644
index 81dc7183e..000000000
--- a/vendor/go.opencensus.io/internal/internal.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opencensus.io/internal"
-
-import (
- "fmt"
- "time"
-
- opencensus "go.opencensus.io"
-)
-
-// UserAgent is the user agent to be added to the outgoing
-// requests from the exporters.
-var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version())
-
-// MonotonicEndTime returns the end time at present
-// but offset from start, monotonically.
-//
-// The monotonic clock is used in subtractions hence
-// the duration since start added back to start gives
-// end as a monotonic time.
-// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks
-func MonotonicEndTime(start time.Time) time.Time {
- return start.Add(time.Since(start))
-}
diff --git a/vendor/go.opencensus.io/internal/sanitize.go b/vendor/go.opencensus.io/internal/sanitize.go
deleted file mode 100644
index de8ccf236..000000000
--- a/vendor/go.opencensus.io/internal/sanitize.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "strings"
- "unicode"
-)
-
-const labelKeySizeLimit = 100
-
-// Sanitize returns a string that is trunacated to 100 characters if it's too
-// long, and replaces non-alphanumeric characters to underscores.
-func Sanitize(s string) string {
- if len(s) == 0 {
- return s
- }
- if len(s) > labelKeySizeLimit {
- s = s[:labelKeySizeLimit]
- }
- s = strings.Map(sanitizeRune, s)
- if unicode.IsDigit(rune(s[0])) {
- s = "key_" + s
- }
- if s[0] == '_' {
- s = "key" + s
- }
- return s
-}
-
-// converts anything that is not a letter or digit to an underscore
-func sanitizeRune(r rune) rune {
- if unicode.IsLetter(r) || unicode.IsDigit(r) {
- return r
- }
- // Everything else turns into an underscore
- return '_'
-}
diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go
deleted file mode 100644
index 41b2c3fc0..000000000
--- a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-// Package tagencoding contains the tag encoding
-// used interally by the stats collector.
-package tagencoding // import "go.opencensus.io/internal/tagencoding"
-
-// Values represent the encoded buffer for the values.
-type Values struct {
- Buffer []byte
- WriteIndex int
- ReadIndex int
-}
-
-func (vb *Values) growIfRequired(expected int) {
- if len(vb.Buffer)-vb.WriteIndex < expected {
- tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected)
- copy(tmp, vb.Buffer)
- vb.Buffer = tmp
- }
-}
-
-// WriteValue is the helper method to encode Values from map[Key][]byte.
-func (vb *Values) WriteValue(v []byte) {
- length := len(v) & 0xff
- vb.growIfRequired(1 + length)
-
- // writing length of v
- vb.Buffer[vb.WriteIndex] = byte(length)
- vb.WriteIndex++
-
- if length == 0 {
- // No value was encoded for this key
- return
- }
-
- // writing v
- copy(vb.Buffer[vb.WriteIndex:], v[:length])
- vb.WriteIndex += length
-}
-
-// ReadValue is the helper method to decode Values to a map[Key][]byte.
-func (vb *Values) ReadValue() []byte {
- // read length of v
- length := int(vb.Buffer[vb.ReadIndex])
- vb.ReadIndex++
- if length == 0 {
- // No value was encoded for this key
- return nil
- }
-
- // read value of v
- v := make([]byte, length)
- endIdx := vb.ReadIndex + length
- copy(v, vb.Buffer[vb.ReadIndex:endIdx])
- vb.ReadIndex = endIdx
- return v
-}
-
-// Bytes returns a reference to already written bytes in the Buffer.
-func (vb *Values) Bytes() []byte {
- return vb.Buffer[:vb.WriteIndex]
-}
diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go
deleted file mode 100644
index 073af7b47..000000000
--- a/vendor/go.opencensus.io/internal/traceinternals.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "time"
-)
-
-// Trace allows internal access to some trace functionality.
-// TODO(#412): remove this
-var Trace interface{}
-
-// LocalSpanStoreEnabled true if the local span store is enabled.
-var LocalSpanStoreEnabled bool
-
-// BucketConfiguration stores the number of samples to store for span buckets
-// for successful and failed spans for a particular span name.
-type BucketConfiguration struct {
- Name string
- MaxRequestsSucceeded int
- MaxRequestsErrors int
-}
-
-// PerMethodSummary is a summary of the spans stored for a single span name.
-type PerMethodSummary struct {
- Active int
- LatencyBuckets []LatencyBucketSummary
- ErrorBuckets []ErrorBucketSummary
-}
-
-// LatencyBucketSummary is a summary of a latency bucket.
-type LatencyBucketSummary struct {
- MinLatency, MaxLatency time.Duration
- Size int
-}
-
-// ErrorBucketSummary is a summary of an error bucket.
-type ErrorBucketSummary struct {
- ErrorCode int32
- Size int
-}
diff --git a/vendor/go.opencensus.io/metric/metricdata/doc.go b/vendor/go.opencensus.io/metric/metricdata/doc.go
deleted file mode 100644
index 52a7b3bf8..000000000
--- a/vendor/go.opencensus.io/metric/metricdata/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package metricdata contains the metrics data model.
-//
-// This is an EXPERIMENTAL package, and may change in arbitrary ways without
-// notice.
-package metricdata // import "go.opencensus.io/metric/metricdata"
diff --git a/vendor/go.opencensus.io/metric/metricdata/exemplar.go b/vendor/go.opencensus.io/metric/metricdata/exemplar.go
deleted file mode 100644
index 12695ce2d..000000000
--- a/vendor/go.opencensus.io/metric/metricdata/exemplar.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metricdata
-
-import (
- "time"
-)
-
-// Exemplars keys.
-const (
- AttachmentKeySpanContext = "SpanContext"
-)
-
-// Exemplar is an example data point associated with each bucket of a
-// distribution type aggregation.
-//
-// Their purpose is to provide an example of the kind of thing
-// (request, RPC, trace span, etc.) that resulted in that measurement.
-type Exemplar struct {
- Value float64 // the value that was recorded
- Timestamp time.Time // the time the value was recorded
- Attachments Attachments // attachments (if any)
-}
-
-// Attachments is a map of extra values associated with a recorded data point.
-type Attachments map[string]interface{}
diff --git a/vendor/go.opencensus.io/metric/metricdata/label.go b/vendor/go.opencensus.io/metric/metricdata/label.go
deleted file mode 100644
index aadae41e6..000000000
--- a/vendor/go.opencensus.io/metric/metricdata/label.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metricdata
-
-// LabelKey represents key of a label. It has optional
-// description attribute.
-type LabelKey struct {
- Key string
- Description string
-}
-
-// LabelValue represents the value of a label.
-// The zero value represents a missing label value, which may be treated
-// differently to an empty string value by some back ends.
-type LabelValue struct {
- Value string // string value of the label
- Present bool // flag that indicated whether a value is present or not
-}
-
-// NewLabelValue creates a new non-nil LabelValue that represents the given string.
-func NewLabelValue(val string) LabelValue {
- return LabelValue{Value: val, Present: true}
-}
diff --git a/vendor/go.opencensus.io/metric/metricdata/metric.go b/vendor/go.opencensus.io/metric/metricdata/metric.go
deleted file mode 100644
index 8293712c7..000000000
--- a/vendor/go.opencensus.io/metric/metricdata/metric.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metricdata
-
-import (
- "time"
-
- "go.opencensus.io/resource"
-)
-
-// Descriptor holds metadata about a metric.
-type Descriptor struct {
- Name string // full name of the metric
- Description string // human-readable description
- Unit Unit // units for the measure
- Type Type // type of measure
- LabelKeys []LabelKey // label keys
-}
-
-// Metric represents a quantity measured against a resource with different
-// label value combinations.
-type Metric struct {
- Descriptor Descriptor // metric descriptor
- Resource *resource.Resource // resource against which this was measured
- TimeSeries []*TimeSeries // one time series for each combination of label values
-}
-
-// TimeSeries is a sequence of points associated with a combination of label
-// values.
-type TimeSeries struct {
- LabelValues []LabelValue // label values, same order as keys in the metric descriptor
- Points []Point // points sequence
- StartTime time.Time // time we started recording this time series
-}
diff --git a/vendor/go.opencensus.io/metric/metricdata/point.go b/vendor/go.opencensus.io/metric/metricdata/point.go
deleted file mode 100644
index 7fe057b19..000000000
--- a/vendor/go.opencensus.io/metric/metricdata/point.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metricdata
-
-import (
- "time"
-)
-
-// Point is a single data point of a time series.
-type Point struct {
- // Time is the point in time that this point represents in a time series.
- Time time.Time
- // Value is the value of this point. Prefer using ReadValue to switching on
- // the value type, since new value types might be added.
- Value interface{}
-}
-
-//go:generate stringer -type ValueType
-
-// NewFloat64Point creates a new Point holding a float64 value.
-func NewFloat64Point(t time.Time, val float64) Point {
- return Point{
- Value: val,
- Time: t,
- }
-}
-
-// NewInt64Point creates a new Point holding an int64 value.
-func NewInt64Point(t time.Time, val int64) Point {
- return Point{
- Value: val,
- Time: t,
- }
-}
-
-// NewDistributionPoint creates a new Point holding a Distribution value.
-func NewDistributionPoint(t time.Time, val *Distribution) Point {
- return Point{
- Value: val,
- Time: t,
- }
-}
-
-// NewSummaryPoint creates a new Point holding a Summary value.
-func NewSummaryPoint(t time.Time, val *Summary) Point {
- return Point{
- Value: val,
- Time: t,
- }
-}
-
-// ValueVisitor allows reading the value of a point.
-type ValueVisitor interface {
- VisitFloat64Value(float64)
- VisitInt64Value(int64)
- VisitDistributionValue(*Distribution)
- VisitSummaryValue(*Summary)
-}
-
-// ReadValue accepts a ValueVisitor and calls the appropriate method with the
-// value of this point.
-// Consumers of Point should use this in preference to switching on the type
-// of the value directly, since new value types may be added.
-func (p Point) ReadValue(vv ValueVisitor) {
- switch v := p.Value.(type) {
- case int64:
- vv.VisitInt64Value(v)
- case float64:
- vv.VisitFloat64Value(v)
- case *Distribution:
- vv.VisitDistributionValue(v)
- case *Summary:
- vv.VisitSummaryValue(v)
- default:
- panic("unexpected value type")
- }
-}
-
-// Distribution contains summary statistics for a population of values. It
-// optionally contains a histogram representing the distribution of those
-// values across a set of buckets.
-type Distribution struct {
- // Count is the number of values in the population. Must be non-negative. This value
- // must equal the sum of the values in bucket_counts if a histogram is
- // provided.
- Count int64
- // Sum is the sum of the values in the population. If count is zero then this field
- // must be zero.
- Sum float64
- // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the
- // population. For values x_i this is:
- //
- // Sum[i=1..n]((x_i - mean)^2)
- //
- // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition
- // describes Welford's method for accumulating this sum in one pass.
- //
- // If count is zero then this field must be zero.
- SumOfSquaredDeviation float64
- // BucketOptions describes the bounds of the histogram buckets in this
- // distribution.
- //
- // A Distribution may optionally contain a histogram of the values in the
- // population.
- //
- // If nil, there is no associated histogram.
- BucketOptions *BucketOptions
- // Bucket If the distribution does not have a histogram, then omit this field.
- // If there is a histogram, then the sum of the values in the Bucket counts
- // must equal the value in the count field of the distribution.
- Buckets []Bucket
-}
-
-// BucketOptions describes the bounds of the histogram buckets in this
-// distribution.
-type BucketOptions struct {
- // Bounds specifies a set of bucket upper bounds.
- // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket
- // index i are:
- //
- // [0, Bounds[i]) for i == 0
- // [Bounds[i-1], Bounds[i]) for 0 < i < N-1
- // [Bounds[i-1], +infinity) for i == N-1
- Bounds []float64
-}
-
-// Bucket represents a single bucket (value range) in a distribution.
-type Bucket struct {
- // Count is the number of values in each bucket of the histogram, as described in
- // bucket_bounds.
- Count int64
- // Exemplar associated with this bucket (if any).
- Exemplar *Exemplar
-}
-
-// Summary is a representation of percentiles.
-type Summary struct {
- // Count is the cumulative count (if available).
- Count int64
- // Sum is the cumulative sum of values (if available).
- Sum float64
- // HasCountAndSum is true if Count and Sum are available.
- HasCountAndSum bool
- // Snapshot represents percentiles calculated over an arbitrary time window.
- // The values in this struct can be reset at arbitrary unknown times, with
- // the requirement that all of them are reset at the same time.
- Snapshot Snapshot
-}
-
-// Snapshot represents percentiles over an arbitrary time.
-// The values in this struct can be reset at arbitrary unknown times, with
-// the requirement that all of them are reset at the same time.
-type Snapshot struct {
- // Count is the number of values in the snapshot. Optional since some systems don't
- // expose this. Set to 0 if not available.
- Count int64
- // Sum is the sum of values in the snapshot. Optional since some systems don't
- // expose this. If count is 0 then this field must be zero.
- Sum float64
- // Percentiles is a map from percentile (range (0-100.0]) to the value of
- // the percentile.
- Percentiles map[float64]float64
-}
-
-//go:generate stringer -type Type
-
-// Type is the overall type of metric, including its value type and whether it
-// represents a cumulative total (since the start time) or if it represents a
-// gauge value.
-type Type int
-
-// Metric types.
-const (
- TypeGaugeInt64 Type = iota
- TypeGaugeFloat64
- TypeGaugeDistribution
- TypeCumulativeInt64
- TypeCumulativeFloat64
- TypeCumulativeDistribution
- TypeSummary
-)
diff --git a/vendor/go.opencensus.io/metric/metricdata/type_string.go b/vendor/go.opencensus.io/metric/metricdata/type_string.go
deleted file mode 100644
index c3f8ec27b..000000000
--- a/vendor/go.opencensus.io/metric/metricdata/type_string.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Code generated by "stringer -type Type"; DO NOT EDIT.
-
-package metricdata
-
-import "strconv"
-
-const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary"
-
-var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128}
-
-func (i Type) String() string {
- if i < 0 || i >= Type(len(_Type_index)-1) {
- return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _Type_name[_Type_index[i]:_Type_index[i+1]]
-}
diff --git a/vendor/go.opencensus.io/metric/metricdata/unit.go b/vendor/go.opencensus.io/metric/metricdata/unit.go
deleted file mode 100644
index b483a1371..000000000
--- a/vendor/go.opencensus.io/metric/metricdata/unit.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metricdata
-
-// Unit is a string encoded according to the case-sensitive abbreviations from the
-// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
-type Unit string
-
-// Predefined units. To record against a unit not represented here, create your
-// own Unit type constant from a string.
-const (
- UnitDimensionless Unit = "1"
- UnitBytes Unit = "By"
- UnitMilliseconds Unit = "ms"
-)
diff --git a/vendor/go.opencensus.io/metric/metricexport/doc.go b/vendor/go.opencensus.io/metric/metricexport/doc.go
deleted file mode 100644
index df632a792..000000000
--- a/vendor/go.opencensus.io/metric/metricexport/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package metricexport contains support for exporting metric data.
-//
-// This is an EXPERIMENTAL package, and may change in arbitrary ways without
-// notice.
-package metricexport // import "go.opencensus.io/metric/metricexport"
diff --git a/vendor/go.opencensus.io/metric/metricexport/export.go b/vendor/go.opencensus.io/metric/metricexport/export.go
deleted file mode 100644
index 23f4a864a..000000000
--- a/vendor/go.opencensus.io/metric/metricexport/export.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metricexport
-
-import (
- "context"
-
- "go.opencensus.io/metric/metricdata"
-)
-
-// Exporter is an interface that exporters implement to export the metric data.
-type Exporter interface {
- ExportMetrics(ctx context.Context, data []*metricdata.Metric) error
-}
diff --git a/vendor/go.opencensus.io/metric/metricexport/reader.go b/vendor/go.opencensus.io/metric/metricexport/reader.go
deleted file mode 100644
index 8a09d0f00..000000000
--- a/vendor/go.opencensus.io/metric/metricexport/reader.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package metricexport
-
-import (
- "context"
- "fmt"
- "sync"
- "time"
-
- "go.opencensus.io/metric/metricdata"
- "go.opencensus.io/metric/metricproducer"
- "go.opencensus.io/trace"
-)
-
-var (
- defaultSampler = trace.ProbabilitySampler(0.0001)
- errReportingIntervalTooLow = fmt.Errorf("reporting interval less than %d", minimumReportingDuration)
- errAlreadyStarted = fmt.Errorf("already started")
- errIntervalReaderNil = fmt.Errorf("interval reader is nil")
- errExporterNil = fmt.Errorf("exporter is nil")
- errReaderNil = fmt.Errorf("reader is nil")
-)
-
-const (
- defaultReportingDuration = 60 * time.Second
- minimumReportingDuration = 1 * time.Second
- defaultSpanName = "ExportMetrics"
-)
-
-// ReaderOptions contains options pertaining to metrics reader.
-type ReaderOptions struct {
- // SpanName is the name used for span created to export metrics.
- SpanName string
-}
-
-// Reader reads metrics from all producers registered
-// with producer manager and exports those metrics using provided
-// exporter.
-type Reader struct {
- sampler trace.Sampler
-
- spanName string
-}
-
-// IntervalReader periodically reads metrics from all producers registered
-// with producer manager and exports those metrics using provided
-// exporter. Call Reader.Stop() to stop the reader.
-type IntervalReader struct {
- // ReportingInterval it the time duration between two consecutive
- // metrics reporting. defaultReportingDuration is used if it is not set.
- // It cannot be set lower than minimumReportingDuration.
- ReportingInterval time.Duration
-
- exporter Exporter
- timer *time.Ticker
- quit, done chan bool
- mu sync.RWMutex
- reader *Reader
-}
-
-// ReaderOption apply changes to ReaderOptions.
-type ReaderOption func(*ReaderOptions)
-
-// WithSpanName makes new reader to use given span name when exporting metrics.
-func WithSpanName(spanName string) ReaderOption {
- return func(o *ReaderOptions) {
- o.SpanName = spanName
- }
-}
-
-// NewReader returns a reader configured with specified options.
-func NewReader(o ...ReaderOption) *Reader {
- var opts ReaderOptions
- for _, op := range o {
- op(&opts)
- }
- reader := &Reader{defaultSampler, defaultSpanName}
- if opts.SpanName != "" {
- reader.spanName = opts.SpanName
- }
- return reader
-}
-
-// NewIntervalReader creates a reader. Once started it periodically
-// reads metrics from all producers and exports them using provided exporter.
-func NewIntervalReader(reader *Reader, exporter Exporter) (*IntervalReader, error) {
- if exporter == nil {
- return nil, errExporterNil
- }
- if reader == nil {
- return nil, errReaderNil
- }
-
- r := &IntervalReader{
- exporter: exporter,
- reader: reader,
- }
- return r, nil
-}
-
-// Start starts the IntervalReader which periodically reads metrics from all
-// producers registered with global producer manager. If the reporting interval
-// is not set prior to calling this function then default reporting interval
-// is used.
-func (ir *IntervalReader) Start() error {
- if ir == nil {
- return errIntervalReaderNil
- }
- ir.mu.Lock()
- defer ir.mu.Unlock()
- var reportingInterval = defaultReportingDuration
- if ir.ReportingInterval != 0 {
- if ir.ReportingInterval < minimumReportingDuration {
- return errReportingIntervalTooLow
- }
- reportingInterval = ir.ReportingInterval
- }
-
- if ir.quit != nil {
- return errAlreadyStarted
- }
- ir.timer = time.NewTicker(reportingInterval)
- ir.quit = make(chan bool)
- ir.done = make(chan bool)
-
- go ir.startInternal()
- return nil
-}
-
-func (ir *IntervalReader) startInternal() {
- for {
- select {
- case <-ir.timer.C:
- ir.reader.ReadAndExport(ir.exporter)
- case <-ir.quit:
- ir.timer.Stop()
- ir.done <- true
- return
- }
- }
-}
-
-// Stop stops the reader from reading and exporting metrics.
-// Additional call to Stop are no-ops.
-func (ir *IntervalReader) Stop() {
- if ir == nil {
- return
- }
- ir.mu.Lock()
- defer ir.mu.Unlock()
- if ir.quit == nil {
- return
- }
- ir.quit <- true
- <-ir.done
- close(ir.quit)
- close(ir.done)
- ir.quit = nil
-}
-
-// Flush flushes the metrics if IntervalReader is stopped, otherwise no-op.
-func (ir *IntervalReader) Flush() {
- ir.mu.Lock()
- defer ir.mu.Unlock()
-
- // No-op if IntervalReader is not stopped
- if ir.quit != nil {
- return
- }
-
- ir.reader.ReadAndExport(ir.exporter)
-}
-
-// ReadAndExport reads metrics from all producer registered with
-// producer manager and then exports them using provided exporter.
-func (r *Reader) ReadAndExport(exporter Exporter) {
- ctx, span := trace.StartSpan(context.Background(), r.spanName, trace.WithSampler(r.sampler))
- defer span.End()
- producers := metricproducer.GlobalManager().GetAll()
- data := []*metricdata.Metric{}
- for _, producer := range producers {
- data = append(data, producer.Read()...)
- }
- // TODO: [rghetia] add metrics for errors.
- exporter.ExportMetrics(ctx, data)
-}
diff --git a/vendor/go.opencensus.io/metric/metricproducer/manager.go b/vendor/go.opencensus.io/metric/metricproducer/manager.go
deleted file mode 100644
index ca1f39049..000000000
--- a/vendor/go.opencensus.io/metric/metricproducer/manager.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metricproducer
-
-import (
- "sync"
-)
-
-// Manager maintains a list of active producers. Producers can register
-// with the manager to allow readers to read all metrics provided by them.
-// Readers can retrieve all producers registered with the manager,
-// read metrics from the producers and export them.
-type Manager struct {
- mu sync.RWMutex
- producers map[Producer]struct{}
-}
-
-var prodMgr *Manager
-var once sync.Once
-
-// GlobalManager is a single instance of producer manager
-// that is used by all producers and all readers.
-func GlobalManager() *Manager {
- once.Do(func() {
- prodMgr = &Manager{}
- prodMgr.producers = make(map[Producer]struct{})
- })
- return prodMgr
-}
-
-// AddProducer adds the producer to the Manager if it is not already present.
-func (pm *Manager) AddProducer(producer Producer) {
- if producer == nil {
- return
- }
- pm.mu.Lock()
- defer pm.mu.Unlock()
- pm.producers[producer] = struct{}{}
-}
-
-// DeleteProducer deletes the producer from the Manager if it is present.
-func (pm *Manager) DeleteProducer(producer Producer) {
- if producer == nil {
- return
- }
- pm.mu.Lock()
- defer pm.mu.Unlock()
- delete(pm.producers, producer)
-}
-
-// GetAll returns a slice of all producer currently registered with
-// the Manager. For each call it generates a new slice. The slice
-// should not be cached as registration may change at any time. It is
-// typically called periodically by exporter to read metrics from
-// the producers.
-func (pm *Manager) GetAll() []Producer {
- pm.mu.Lock()
- defer pm.mu.Unlock()
- producers := make([]Producer, len(pm.producers))
- i := 0
- for producer := range pm.producers {
- producers[i] = producer
- i++
- }
- return producers
-}
diff --git a/vendor/go.opencensus.io/metric/metricproducer/producer.go b/vendor/go.opencensus.io/metric/metricproducer/producer.go
deleted file mode 100644
index 6cee9ed17..000000000
--- a/vendor/go.opencensus.io/metric/metricproducer/producer.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metricproducer
-
-import (
- "go.opencensus.io/metric/metricdata"
-)
-
-// Producer is a source of metrics.
-type Producer interface {
- // Read should return the current values of all metrics supported by this
- // metric provider.
- // The returned metrics should be unique for each combination of name and
- // resource.
- Read() []*metricdata.Metric
-}
diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go
deleted file mode 100644
index 11e31f421..000000000
--- a/vendor/go.opencensus.io/opencensus.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package opencensus contains Go support for OpenCensus.
-package opencensus // import "go.opencensus.io"
-
-// Version is the current release version of OpenCensus in use.
-func Version() string {
- return "0.24.0"
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client.go b/vendor/go.opencensus.io/plugin/ocgrpc/client.go
deleted file mode 100644
index 2063b6f76..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/client.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocgrpc
-
-import (
- "context"
-
- "go.opencensus.io/trace"
- "google.golang.org/grpc/stats"
-)
-
-// ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and
-// traces. Use with gRPC clients only.
-type ClientHandler struct {
- // StartOptions allows configuring the StartOptions used to create new spans.
- //
- // StartOptions.SpanKind will always be set to trace.SpanKindClient
- // for spans started by this handler.
- StartOptions trace.StartOptions
-}
-
-// HandleConn exists to satisfy gRPC stats.Handler.
-func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) {
- // no-op
-}
-
-// TagConn exists to satisfy gRPC stats.Handler.
-func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context {
- // no-op
- return ctx
-}
-
-// HandleRPC implements per-RPC tracing and stats instrumentation.
-func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
- traceHandleRPC(ctx, rs)
- statsHandleRPC(ctx, rs)
-}
-
-// TagRPC implements per-RPC context management.
-func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
- ctx = c.traceTagRPC(ctx, rti)
- ctx = c.statsTagRPC(ctx, rti)
- return ctx
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go
deleted file mode 100644
index fb3c19d6b..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
-)
-
-// The following variables are measures are recorded by ClientHandler:
-var (
- ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
- ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes)
- ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
- ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes)
- ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds)
- ClientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "Number of started client RPCs.", stats.UnitDimensionless)
- ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds)
-)
-
-// Predefined views may be registered to collect data for the above measures.
-// As always, you may also define your own custom views over measures collected by this
-// package. These are declared as a convenience only; none are registered by
-// default.
-var (
- ClientSentBytesPerRPCView = &view.View{
- Measure: ClientSentBytesPerRPC,
- Name: "grpc.io/client/sent_bytes_per_rpc",
- Description: "Distribution of bytes sent per RPC, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultBytesDistribution,
- }
-
- ClientReceivedBytesPerRPCView = &view.View{
- Measure: ClientReceivedBytesPerRPC,
- Name: "grpc.io/client/received_bytes_per_rpc",
- Description: "Distribution of bytes received per RPC, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultBytesDistribution,
- }
-
- ClientRoundtripLatencyView = &view.View{
- Measure: ClientRoundtripLatency,
- Name: "grpc.io/client/roundtrip_latency",
- Description: "Distribution of round-trip latency, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultMillisecondsDistribution,
- }
-
- // Purposely reuses the count from `ClientRoundtripLatency`, tagging
- // with method and status to result in ClientCompletedRpcs.
- ClientCompletedRPCsView = &view.View{
- Measure: ClientRoundtripLatency,
- Name: "grpc.io/client/completed_rpcs",
- Description: "Count of RPCs by method and status.",
- TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
- Aggregation: view.Count(),
- }
-
- ClientStartedRPCsView = &view.View{
- Measure: ClientStartedRPCs,
- Name: "grpc.io/client/started_rpcs",
- Description: "Number of started client RPCs.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: view.Count(),
- }
-
- ClientSentMessagesPerRPCView = &view.View{
- Measure: ClientSentMessagesPerRPC,
- Name: "grpc.io/client/sent_messages_per_rpc",
- Description: "Distribution of sent messages count per RPC, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultMessageCountDistribution,
- }
-
- ClientReceivedMessagesPerRPCView = &view.View{
- Measure: ClientReceivedMessagesPerRPC,
- Name: "grpc.io/client/received_messages_per_rpc",
- Description: "Distribution of received messages count per RPC, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultMessageCountDistribution,
- }
-
- ClientServerLatencyView = &view.View{
- Measure: ClientServerLatency,
- Name: "grpc.io/client/server_latency",
- Description: "Distribution of server latency as viewed by client, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultMillisecondsDistribution,
- }
-)
-
-// DefaultClientViews are the default client views provided by this package.
-var DefaultClientViews = []*view.View{
- ClientSentBytesPerRPCView,
- ClientReceivedBytesPerRPCView,
- ClientRoundtripLatencyView,
- ClientCompletedRPCsView,
-}
-
-// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count.
-// TODO(acetechnologist): This is temporary and will need to be replaced by a
-// mechanism to load these defaults from a common repository/config shared by
-// all supported languages. Likely a serialized protobuf of these defaults.
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go
deleted file mode 100644
index b36349820..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "context"
- "time"
-
- "go.opencensus.io/tag"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/stats"
-)
-
-// statsTagRPC gets the tag.Map populated by the application code, serializes
-// its tags into the GRPC metadata in order to be sent to the server.
-func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
- startTime := time.Now()
- if info == nil {
- if grpclog.V(2) {
- grpclog.Info("clientHandler.TagRPC called with nil info.")
- }
- return ctx
- }
-
- d := &rpcData{
- startTime: startTime,
- method: info.FullMethodName,
- }
- ts := tag.FromContext(ctx)
- if ts != nil {
- encoded := tag.Encode(ts)
- ctx = stats.SetTags(ctx, encoded)
- }
-
- return context.WithValue(ctx, rpcDataKey, d)
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go
deleted file mode 100644
index 1370323fb..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ocgrpc contains OpenCensus stats and trace
-// integrations for gRPC.
-//
-// Use ServerHandler for servers and ClientHandler for clients.
-package ocgrpc // import "go.opencensus.io/plugin/ocgrpc"
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server.go b/vendor/go.opencensus.io/plugin/ocgrpc/server.go
deleted file mode 100644
index 8a53e0972..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/server.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocgrpc
-
-import (
- "context"
-
- "google.golang.org/grpc/stats"
-
- "go.opencensus.io/trace"
-)
-
-// ServerHandler implements gRPC stats.Handler recording OpenCensus stats and
-// traces. Use with gRPC servers.
-//
-// When installed (see Example), tracing metadata is read from inbound RPCs
-// by default. If no tracing metadata is present, or if the tracing metadata is
-// present but the SpanContext isn't sampled, then a new trace may be started
-// (as determined by Sampler).
-type ServerHandler struct {
- // IsPublicEndpoint may be set to true to always start a new trace around
- // each RPC. Any SpanContext in the RPC metadata will be added as a linked
- // span instead of making it the parent of the span created around the
- // server RPC.
- //
- // Be aware that if you leave this false (the default) on a public-facing
- // server, callers will be able to send tracing metadata in gRPC headers
- // and trigger traces in your backend.
- IsPublicEndpoint bool
-
- // StartOptions to use for to spans started around RPCs handled by this server.
- //
- // These will apply even if there is tracing metadata already
- // present on the inbound RPC but the SpanContext is not sampled. This
- // ensures that each service has some opportunity to be traced. If you would
- // like to not add any additional traces for this gRPC service, set:
- //
- // StartOptions.Sampler = trace.ProbabilitySampler(0.0)
- //
- // StartOptions.SpanKind will always be set to trace.SpanKindServer
- // for spans started by this handler.
- StartOptions trace.StartOptions
-}
-
-var _ stats.Handler = (*ServerHandler)(nil)
-
-// HandleConn exists to satisfy gRPC stats.Handler.
-func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) {
- // no-op
-}
-
-// TagConn exists to satisfy gRPC stats.Handler.
-func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context {
- // no-op
- return ctx
-}
-
-// HandleRPC implements per-RPC tracing and stats instrumentation.
-func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
- traceHandleRPC(ctx, rs)
- statsHandleRPC(ctx, rs)
-}
-
-// TagRPC implements per-RPC context management.
-func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
- ctx = s.traceTagRPC(ctx, rti)
- ctx = s.statsTagRPC(ctx, rti)
- return ctx
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go
deleted file mode 100644
index fe0e97108..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
-)
-
-// The following variables are measures are recorded by ServerHandler:
-var (
- ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
- ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes)
- ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
- ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes)
- ServerStartedRPCs = stats.Int64("grpc.io/server/started_rpcs", "Number of started server RPCs.", stats.UnitDimensionless)
- ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds)
-)
-
-// TODO(acetechnologist): This is temporary and will need to be replaced by a
-// mechanism to load these defaults from a common repository/config shared by
-// all supported languages. Likely a serialized protobuf of these defaults.
-
-// Predefined views may be registered to collect data for the above measures.
-// As always, you may also define your own custom views over measures collected by this
-// package. These are declared as a convenience only; none are registered by
-// default.
-var (
- ServerReceivedBytesPerRPCView = &view.View{
- Name: "grpc.io/server/received_bytes_per_rpc",
- Description: "Distribution of received bytes per RPC, by method.",
- Measure: ServerReceivedBytesPerRPC,
- TagKeys: []tag.Key{KeyServerMethod},
- Aggregation: DefaultBytesDistribution,
- }
-
- ServerSentBytesPerRPCView = &view.View{
- Name: "grpc.io/server/sent_bytes_per_rpc",
- Description: "Distribution of total sent bytes per RPC, by method.",
- Measure: ServerSentBytesPerRPC,
- TagKeys: []tag.Key{KeyServerMethod},
- Aggregation: DefaultBytesDistribution,
- }
-
- ServerLatencyView = &view.View{
- Name: "grpc.io/server/server_latency",
- Description: "Distribution of server latency in milliseconds, by method.",
- TagKeys: []tag.Key{KeyServerMethod},
- Measure: ServerLatency,
- Aggregation: DefaultMillisecondsDistribution,
- }
-
- // Purposely reuses the count from `ServerLatency`, tagging
- // with method and status to result in ServerCompletedRpcs.
- ServerCompletedRPCsView = &view.View{
- Name: "grpc.io/server/completed_rpcs",
- Description: "Count of RPCs by method and status.",
- TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus},
- Measure: ServerLatency,
- Aggregation: view.Count(),
- }
-
- ServerStartedRPCsView = &view.View{
- Measure: ServerStartedRPCs,
- Name: "grpc.io/server/started_rpcs",
- Description: "Number of started server RPCs.",
- TagKeys: []tag.Key{KeyServerMethod},
- Aggregation: view.Count(),
- }
-
- ServerReceivedMessagesPerRPCView = &view.View{
- Name: "grpc.io/server/received_messages_per_rpc",
- Description: "Distribution of messages received count per RPC, by method.",
- TagKeys: []tag.Key{KeyServerMethod},
- Measure: ServerReceivedMessagesPerRPC,
- Aggregation: DefaultMessageCountDistribution,
- }
-
- ServerSentMessagesPerRPCView = &view.View{
- Name: "grpc.io/server/sent_messages_per_rpc",
- Description: "Distribution of messages sent count per RPC, by method.",
- TagKeys: []tag.Key{KeyServerMethod},
- Measure: ServerSentMessagesPerRPC,
- Aggregation: DefaultMessageCountDistribution,
- }
-)
-
-// DefaultServerViews are the default server views provided by this package.
-var DefaultServerViews = []*view.View{
- ServerReceivedBytesPerRPCView,
- ServerSentBytesPerRPCView,
- ServerLatencyView,
- ServerCompletedRPCsView,
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go
deleted file mode 100644
index afcef023a..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "time"
-
- "context"
-
- "go.opencensus.io/tag"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/stats"
-)
-
-// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from
-// it and creates a new tag.Map and puts them into the returned context.
-func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
- startTime := time.Now()
- if info == nil {
- if grpclog.V(2) {
- grpclog.Infof("opencensus: TagRPC called with nil info.")
- }
- return ctx
- }
- d := &rpcData{
- startTime: startTime,
- method: info.FullMethodName,
- }
- propagated := h.extractPropagatedTags(ctx)
- ctx = tag.NewContext(ctx, propagated)
- ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName)))
- return context.WithValue(ctx, rpcDataKey, d)
-}
-
-// extractPropagatedTags creates a new tag map containing the tags extracted from the
-// gRPC metadata.
-func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map {
- buf := stats.Tags(ctx)
- if buf == nil {
- return nil
- }
- propagated, err := tag.Decode(buf)
- if err != nil {
- if grpclog.V(2) {
- grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err)
- }
- return nil
- }
- return propagated
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go
deleted file mode 100644
index 9cb27320c..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "context"
- "strconv"
- "strings"
- "sync/atomic"
- "time"
-
- "go.opencensus.io/metric/metricdata"
- ocstats "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
- "go.opencensus.io/trace"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
-)
-
-type grpcInstrumentationKey string
-
-// rpcData holds the instrumentation RPC data that is needed between the start
-// and end of an call. It holds the info that this package needs to keep track
-// of between the various GRPC events.
-type rpcData struct {
- // reqCount and respCount has to be the first words
- // in order to be 64-aligned on 32-bit architectures.
- sentCount, sentBytes, recvCount, recvBytes int64 // access atomically
-
- // startTime represents the time at which TagRPC was invoked at the
- // beginning of an RPC. It is an appoximation of the time when the
- // application code invoked GRPC code.
- startTime time.Time
- method string
-}
-
-// The following variables define the default hard-coded auxiliary data used by
-// both the default GRPC client and GRPC server metrics.
-var (
- DefaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
- DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
- DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536)
-)
-
-// Server tags are applied to the context used to process each RPC, as well as
-// the measures at the end of each RPC.
-var (
- KeyServerMethod = tag.MustNewKey("grpc_server_method")
- KeyServerStatus = tag.MustNewKey("grpc_server_status")
-)
-
-// Client tags are applied to measures at the end of each RPC.
-var (
- KeyClientMethod = tag.MustNewKey("grpc_client_method")
- KeyClientStatus = tag.MustNewKey("grpc_client_status")
-)
-
-var (
- rpcDataKey = grpcInstrumentationKey("opencensus-rpcData")
-)
-
-func methodName(fullname string) string {
- return strings.TrimLeft(fullname, "/")
-}
-
-// statsHandleRPC processes the RPC events.
-func statsHandleRPC(ctx context.Context, s stats.RPCStats) {
- switch st := s.(type) {
- case *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer:
- // do nothing for client
- case *stats.Begin:
- handleRPCBegin(ctx, st)
- case *stats.OutPayload:
- handleRPCOutPayload(ctx, st)
- case *stats.InPayload:
- handleRPCInPayload(ctx, st)
- case *stats.End:
- handleRPCEnd(ctx, st)
- default:
- grpclog.Infof("unexpected stats: %T", st)
- }
-}
-
-func handleRPCBegin(ctx context.Context, s *stats.Begin) {
- d, ok := ctx.Value(rpcDataKey).(*rpcData)
- if !ok {
- if grpclog.V(2) {
- grpclog.Infoln("Failed to retrieve *rpcData from context.")
- }
- }
-
- if s.IsClient() {
- ocstats.RecordWithOptions(ctx,
- ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))),
- ocstats.WithMeasurements(ClientStartedRPCs.M(1)))
- } else {
- ocstats.RecordWithOptions(ctx,
- ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))),
- ocstats.WithMeasurements(ServerStartedRPCs.M(1)))
- }
-}
-
-func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) {
- d, ok := ctx.Value(rpcDataKey).(*rpcData)
- if !ok {
- if grpclog.V(2) {
- grpclog.Infoln("Failed to retrieve *rpcData from context.")
- }
- return
- }
-
- atomic.AddInt64(&d.sentBytes, int64(s.Length))
- atomic.AddInt64(&d.sentCount, 1)
-}
-
-func handleRPCInPayload(ctx context.Context, s *stats.InPayload) {
- d, ok := ctx.Value(rpcDataKey).(*rpcData)
- if !ok {
- if grpclog.V(2) {
- grpclog.Infoln("Failed to retrieve *rpcData from context.")
- }
- return
- }
-
- atomic.AddInt64(&d.recvBytes, int64(s.Length))
- atomic.AddInt64(&d.recvCount, 1)
-}
-
-func handleRPCEnd(ctx context.Context, s *stats.End) {
- d, ok := ctx.Value(rpcDataKey).(*rpcData)
- if !ok {
- if grpclog.V(2) {
- grpclog.Infoln("Failed to retrieve *rpcData from context.")
- }
- return
- }
-
- elapsedTime := time.Since(d.startTime)
-
- var st string
- if s.Error != nil {
- s, ok := status.FromError(s.Error)
- if ok {
- st = statusCodeToString(s)
- }
- } else {
- st = "OK"
- }
-
- latencyMillis := float64(elapsedTime) / float64(time.Millisecond)
- attachments := getSpanCtxAttachment(ctx)
- if s.Client {
- ocstats.RecordWithOptions(ctx,
- ocstats.WithTags(
- tag.Upsert(KeyClientMethod, methodName(d.method)),
- tag.Upsert(KeyClientStatus, st)),
- ocstats.WithAttachments(attachments),
- ocstats.WithMeasurements(
- ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
- ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
- ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
- ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
- ClientRoundtripLatency.M(latencyMillis)))
- } else {
- ocstats.RecordWithOptions(ctx,
- ocstats.WithTags(
- tag.Upsert(KeyServerStatus, st),
- ),
- ocstats.WithAttachments(attachments),
- ocstats.WithMeasurements(
- ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
- ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
- ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
- ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
- ServerLatency.M(latencyMillis)))
- }
-}
-
-func statusCodeToString(s *status.Status) string {
- // see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
- switch c := s.Code(); c {
- case codes.OK:
- return "OK"
- case codes.Canceled:
- return "CANCELLED"
- case codes.Unknown:
- return "UNKNOWN"
- case codes.InvalidArgument:
- return "INVALID_ARGUMENT"
- case codes.DeadlineExceeded:
- return "DEADLINE_EXCEEDED"
- case codes.NotFound:
- return "NOT_FOUND"
- case codes.AlreadyExists:
- return "ALREADY_EXISTS"
- case codes.PermissionDenied:
- return "PERMISSION_DENIED"
- case codes.ResourceExhausted:
- return "RESOURCE_EXHAUSTED"
- case codes.FailedPrecondition:
- return "FAILED_PRECONDITION"
- case codes.Aborted:
- return "ABORTED"
- case codes.OutOfRange:
- return "OUT_OF_RANGE"
- case codes.Unimplemented:
- return "UNIMPLEMENTED"
- case codes.Internal:
- return "INTERNAL"
- case codes.Unavailable:
- return "UNAVAILABLE"
- case codes.DataLoss:
- return "DATA_LOSS"
- case codes.Unauthenticated:
- return "UNAUTHENTICATED"
- default:
- return "CODE_" + strconv.FormatInt(int64(c), 10)
- }
-}
-
-func getSpanCtxAttachment(ctx context.Context) metricdata.Attachments {
- attachments := map[string]interface{}{}
- span := trace.FromContext(ctx)
- if span == nil {
- return attachments
- }
- spanCtx := span.SpanContext()
- if spanCtx.IsSampled() {
- attachments[metricdata.AttachmentKeySpanContext] = spanCtx
- }
- return attachments
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go
deleted file mode 100644
index 61bc543d0..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocgrpc
-
-import (
- "context"
- "strings"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-const traceContextKey = "grpc-trace-bin"
-
-// TagRPC creates a new trace span for the client side of the RPC.
-//
-// It returns ctx with the new trace span added and a serialization of the
-// SpanContext added to the outgoing gRPC metadata.
-func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
- name := strings.TrimPrefix(rti.FullMethodName, "/")
- name = strings.Replace(name, "/", ".", -1)
- ctx, span := trace.StartSpan(ctx, name,
- trace.WithSampler(c.StartOptions.Sampler),
- trace.WithSpanKind(trace.SpanKindClient)) // span is ended by traceHandleRPC
- traceContextBinary := propagation.Binary(span.SpanContext())
- return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary))
-}
-
-// TagRPC creates a new trace span for the server side of the RPC.
-//
-// It checks the incoming gRPC metadata in ctx for a SpanContext, and if
-// it finds one, uses that SpanContext as the parent context of the new span.
-//
-// It returns ctx, with the new trace span added.
-func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
- md, _ := metadata.FromIncomingContext(ctx)
- name := strings.TrimPrefix(rti.FullMethodName, "/")
- name = strings.Replace(name, "/", ".", -1)
- traceContext := md[traceContextKey]
- var (
- parent trace.SpanContext
- haveParent bool
- )
- if len(traceContext) > 0 {
- // Metadata with keys ending in -bin are actually binary. They are base64
- // encoded before being put on the wire, see:
- // https://github.com/grpc/grpc-go/blob/08d6261/Documentation/grpc-metadata.md#storing-binary-data-in-metadata
- traceContextBinary := []byte(traceContext[0])
- parent, haveParent = propagation.FromBinary(traceContextBinary)
- if haveParent && !s.IsPublicEndpoint {
- ctx, _ := trace.StartSpanWithRemoteParent(ctx, name, parent,
- trace.WithSpanKind(trace.SpanKindServer),
- trace.WithSampler(s.StartOptions.Sampler),
- )
- return ctx
- }
- }
- ctx, span := trace.StartSpan(ctx, name,
- trace.WithSpanKind(trace.SpanKindServer),
- trace.WithSampler(s.StartOptions.Sampler))
- if haveParent {
- span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild})
- }
- return ctx
-}
-
-func traceHandleRPC(ctx context.Context, rs stats.RPCStats) {
- span := trace.FromContext(ctx)
- // TODO: compressed and uncompressed sizes are not populated in every message.
- switch rs := rs.(type) {
- case *stats.Begin:
- span.AddAttributes(
- trace.BoolAttribute("Client", rs.Client),
- trace.BoolAttribute("FailFast", rs.FailFast))
- case *stats.InPayload:
- span.AddMessageReceiveEvent(0 /* TODO: messageID */, int64(rs.Length), int64(rs.WireLength))
- case *stats.OutPayload:
- span.AddMessageSendEvent(0, int64(rs.Length), int64(rs.WireLength))
- case *stats.End:
- if rs.Error != nil {
- s, ok := status.FromError(rs.Error)
- if ok {
- span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()})
- } else {
- span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: rs.Error.Error()})
- }
- }
- span.End()
- }
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go
deleted file mode 100644
index da815b2a7..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/client.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "net/http"
- "net/http/httptrace"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-// Transport is an http.RoundTripper that instruments all outgoing requests with
-// OpenCensus stats and tracing.
-//
-// The zero value is intended to be a useful default, but for
-// now it's recommended that you explicitly set Propagation, since the default
-// for this may change.
-type Transport struct {
- // Base may be set to wrap another http.RoundTripper that does the actual
- // requests. By default http.DefaultTransport is used.
- //
- // If base HTTP roundtripper implements CancelRequest,
- // the returned round tripper will be cancelable.
- Base http.RoundTripper
-
- // Propagation defines how traces are propagated. If unspecified, a default
- // (currently B3 format) will be used.
- Propagation propagation.HTTPFormat
-
- // StartOptions are applied to the span started by this Transport around each
- // request.
- //
- // StartOptions.SpanKind will always be set to trace.SpanKindClient
- // for spans started by this transport.
- StartOptions trace.StartOptions
-
- // GetStartOptions allows to set start options per request. If set,
- // StartOptions is going to be ignored.
- GetStartOptions func(*http.Request) trace.StartOptions
-
- // NameFromRequest holds the function to use for generating the span name
- // from the information found in the outgoing HTTP Request. By default the
- // name equals the URL Path.
- FormatSpanName func(*http.Request) string
-
- // NewClientTrace may be set to a function allowing the current *trace.Span
- // to be annotated with HTTP request event information emitted by the
- // httptrace package.
- NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace
-
- // TODO: Implement tag propagation for HTTP.
-}
-
-// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request.
-func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
- rt := t.base()
- if isHealthEndpoint(req.URL.Path) {
- return rt.RoundTrip(req)
- }
- // TODO: remove excessive nesting of http.RoundTrippers here.
- format := t.Propagation
- if format == nil {
- format = defaultFormat
- }
- spanNameFormatter := t.FormatSpanName
- if spanNameFormatter == nil {
- spanNameFormatter = spanNameFromURL
- }
-
- startOpts := t.StartOptions
- if t.GetStartOptions != nil {
- startOpts = t.GetStartOptions(req)
- }
-
- rt = &traceTransport{
- base: rt,
- format: format,
- startOptions: trace.StartOptions{
- Sampler: startOpts.Sampler,
- SpanKind: trace.SpanKindClient,
- },
- formatSpanName: spanNameFormatter,
- newClientTrace: t.NewClientTrace,
- }
- rt = statsTransport{base: rt}
- return rt.RoundTrip(req)
-}
-
-func (t *Transport) base() http.RoundTripper {
- if t.Base != nil {
- return t.Base
- }
- return http.DefaultTransport
-}
-
-// CancelRequest cancels an in-flight request by closing its connection.
-func (t *Transport) CancelRequest(req *http.Request) {
- type canceler interface {
- CancelRequest(*http.Request)
- }
- if cr, ok := t.base().(canceler); ok {
- cr.CancelRequest(req)
- }
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go
deleted file mode 100644
index 17142aabe..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "context"
- "io"
- "net/http"
- "strconv"
- "sync"
- "time"
-
- "go.opencensus.io/stats"
- "go.opencensus.io/tag"
-)
-
-// statsTransport is an http.RoundTripper that collects stats for the outgoing requests.
-type statsTransport struct {
- base http.RoundTripper
-}
-
-// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request.
-func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- ctx, _ := tag.New(req.Context(),
- tag.Upsert(KeyClientHost, req.Host),
- tag.Upsert(Host, req.Host),
- tag.Upsert(KeyClientPath, req.URL.Path),
- tag.Upsert(Path, req.URL.Path),
- tag.Upsert(KeyClientMethod, req.Method),
- tag.Upsert(Method, req.Method))
- req = req.WithContext(ctx)
- track := &tracker{
- start: time.Now(),
- ctx: ctx,
- }
- if req.Body == nil {
- // TODO: Handle cases where ContentLength is not set.
- track.reqSize = -1
- } else if req.ContentLength > 0 {
- track.reqSize = req.ContentLength
- }
- stats.Record(ctx, ClientRequestCount.M(1))
-
- // Perform request.
- resp, err := t.base.RoundTrip(req)
-
- if err != nil {
- track.statusCode = http.StatusInternalServerError
- track.end()
- } else {
- track.statusCode = resp.StatusCode
- if req.Method != "HEAD" {
- track.respContentLength = resp.ContentLength
- }
- if resp.Body == nil {
- track.end()
- } else {
- track.body = resp.Body
- resp.Body = wrappedBody(track, resp.Body)
- }
- }
- return resp, err
-}
-
-// CancelRequest cancels an in-flight request by closing its connection.
-func (t statsTransport) CancelRequest(req *http.Request) {
- type canceler interface {
- CancelRequest(*http.Request)
- }
- if cr, ok := t.base.(canceler); ok {
- cr.CancelRequest(req)
- }
-}
-
-type tracker struct {
- ctx context.Context
- respSize int64
- respContentLength int64
- reqSize int64
- start time.Time
- body io.ReadCloser
- statusCode int
- endOnce sync.Once
-}
-
-var _ io.ReadCloser = (*tracker)(nil)
-
-func (t *tracker) end() {
- t.endOnce.Do(func() {
- latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond)
- respSize := t.respSize
- if t.respSize == 0 && t.respContentLength > 0 {
- respSize = t.respContentLength
- }
- m := []stats.Measurement{
- ClientSentBytes.M(t.reqSize),
- ClientReceivedBytes.M(respSize),
- ClientRoundtripLatency.M(latencyMs),
- ClientLatency.M(latencyMs),
- ClientResponseBytes.M(t.respSize),
- }
- if t.reqSize >= 0 {
- m = append(m, ClientRequestBytes.M(t.reqSize))
- }
-
- stats.RecordWithTags(t.ctx, []tag.Mutator{
- tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)),
- tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)),
- }, m...)
- })
-}
-
-func (t *tracker) Read(b []byte) (int, error) {
- n, err := t.body.Read(b)
- t.respSize += int64(n)
- switch err {
- case nil:
- return n, nil
- case io.EOF:
- t.end()
- }
- return n, err
-}
-
-func (t *tracker) Close() error {
- // Invoking endSpan on Close will help catch the cases
- // in which a read returned a non-nil error, we set the
- // span status but didn't end the span.
- t.end()
- return t.body.Close()
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/doc.go b/vendor/go.opencensus.io/plugin/ochttp/doc.go
deleted file mode 100644
index 10e626b16..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ochttp provides OpenCensus instrumentation for net/http package.
-//
-// For server instrumentation, see Handler. For client-side instrumentation,
-// see Transport.
-package ochttp // import "go.opencensus.io/plugin/ochttp"
diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go
deleted file mode 100644
index 9ad885219..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package b3 contains a propagation.HTTPFormat implementation
-// for B3 propagation. See https://github.com/openzipkin/b3-propagation
-// for more details.
-package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3"
-
-import (
- "encoding/hex"
- "net/http"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-// B3 headers that OpenCensus understands.
-const (
- TraceIDHeader = "X-B3-TraceId"
- SpanIDHeader = "X-B3-SpanId"
- SampledHeader = "X-B3-Sampled"
-)
-
-// HTTPFormat implements propagation.HTTPFormat to propagate
-// traces in HTTP headers in B3 propagation format.
-// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers
-// because there are additional fields not represented in the
-// OpenCensus span context. Spans created from the incoming
-// header will be the direct children of the client-side span.
-// Similarly, receiver of the outgoing spans should use client-side
-// span created by OpenCensus as the parent.
-type HTTPFormat struct{}
-
-var _ propagation.HTTPFormat = (*HTTPFormat)(nil)
-
-// SpanContextFromRequest extracts a B3 span context from incoming requests.
-func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
- tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader))
- if !ok {
- return trace.SpanContext{}, false
- }
- sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader))
- if !ok {
- return trace.SpanContext{}, false
- }
- sampled, _ := ParseSampled(req.Header.Get(SampledHeader))
- return trace.SpanContext{
- TraceID: tid,
- SpanID: sid,
- TraceOptions: sampled,
- }, true
-}
-
-// ParseTraceID parses the value of the X-B3-TraceId header.
-func ParseTraceID(tid string) (trace.TraceID, bool) {
- if tid == "" {
- return trace.TraceID{}, false
- }
- b, err := hex.DecodeString(tid)
- if err != nil || len(b) > 16 {
- return trace.TraceID{}, false
- }
- var traceID trace.TraceID
- if len(b) <= 8 {
- // The lower 64-bits.
- start := 8 + (8 - len(b))
- copy(traceID[start:], b)
- } else {
- start := 16 - len(b)
- copy(traceID[start:], b)
- }
-
- return traceID, true
-}
-
-// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers.
-func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) {
- if sid == "" {
- return trace.SpanID{}, false
- }
- b, err := hex.DecodeString(sid)
- if err != nil || len(b) > 8 {
- return trace.SpanID{}, false
- }
- start := 8 - len(b)
- copy(spanID[start:], b)
- return spanID, true
-}
-
-// ParseSampled parses the value of the X-B3-Sampled header.
-func ParseSampled(sampled string) (trace.TraceOptions, bool) {
- switch sampled {
- case "true", "1":
- return trace.TraceOptions(1), true
- default:
- return trace.TraceOptions(0), false
- }
-}
-
-// SpanContextToRequest modifies the given request to include B3 headers.
-func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
- req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:]))
- req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:]))
-
- var sampled string
- if sc.IsSampled() {
- sampled = "1"
- } else {
- sampled = "0"
- }
- req.Header.Set(SampledHeader, sampled)
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go
deleted file mode 100644
index 48d815249..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package tracecontext contains HTTP propagator for TraceContext standard.
-// See https://github.com/w3c/distributed-tracing for more information.
-package tracecontext // import "go.opencensus.io/plugin/ochttp/propagation/tracecontext"
-
-import (
- "encoding/hex"
- "fmt"
- "net/http"
- "net/textproto"
- "regexp"
- "strings"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
- "go.opencensus.io/trace/tracestate"
-)
-
-const (
- supportedVersion = 0
- maxVersion = 254
- maxTracestateLen = 512
- traceparentHeader = "traceparent"
- tracestateHeader = "tracestate"
- trimOWSRegexFmt = `^[\x09\x20]*(.*[^\x20\x09])[\x09\x20]*$`
-)
-
-var trimOWSRegExp = regexp.MustCompile(trimOWSRegexFmt)
-
-var _ propagation.HTTPFormat = (*HTTPFormat)(nil)
-
-// HTTPFormat implements the TraceContext trace propagation format.
-type HTTPFormat struct{}
-
-// SpanContextFromRequest extracts a span context from incoming requests.
-func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
- tp, _ := getRequestHeader(req, traceparentHeader, false)
- ts, _ := getRequestHeader(req, tracestateHeader, true)
- return f.SpanContextFromHeaders(tp, ts)
-}
-
-// SpanContextFromHeaders extracts a span context from provided header values.
-func (f *HTTPFormat) SpanContextFromHeaders(tp string, ts string) (sc trace.SpanContext, ok bool) {
- if tp == "" {
- return trace.SpanContext{}, false
- }
- sections := strings.Split(tp, "-")
- if len(sections) < 4 {
- return trace.SpanContext{}, false
- }
-
- if len(sections[0]) != 2 {
- return trace.SpanContext{}, false
- }
- ver, err := hex.DecodeString(sections[0])
- if err != nil {
- return trace.SpanContext{}, false
- }
- version := int(ver[0])
- if version > maxVersion {
- return trace.SpanContext{}, false
- }
-
- if version == 0 && len(sections) != 4 {
- return trace.SpanContext{}, false
- }
-
- if len(sections[1]) != 32 {
- return trace.SpanContext{}, false
- }
- tid, err := hex.DecodeString(sections[1])
- if err != nil {
- return trace.SpanContext{}, false
- }
- copy(sc.TraceID[:], tid)
-
- if len(sections[2]) != 16 {
- return trace.SpanContext{}, false
- }
- sid, err := hex.DecodeString(sections[2])
- if err != nil {
- return trace.SpanContext{}, false
- }
- copy(sc.SpanID[:], sid)
-
- opts, err := hex.DecodeString(sections[3])
- if err != nil || len(opts) < 1 {
- return trace.SpanContext{}, false
- }
- sc.TraceOptions = trace.TraceOptions(opts[0])
-
- // Don't allow all zero trace or span ID.
- if sc.TraceID == [16]byte{} || sc.SpanID == [8]byte{} {
- return trace.SpanContext{}, false
- }
-
- sc.Tracestate = tracestateFromHeader(ts)
- return sc, true
-}
-
-// getRequestHeader returns a combined header field according to RFC7230 section 3.2.2.
-// If commaSeparated is true, multiple header fields with the same field name using be
-// combined using ",".
-// If no header was found using the given name, "ok" would be false.
-// If more than one headers was found using the given name, while commaSeparated is false,
-// "ok" would be false.
-func getRequestHeader(req *http.Request, name string, commaSeparated bool) (hdr string, ok bool) {
- v := req.Header[textproto.CanonicalMIMEHeaderKey(name)]
- switch len(v) {
- case 0:
- return "", false
- case 1:
- return v[0], true
- default:
- return strings.Join(v, ","), commaSeparated
- }
-}
-
-// TODO(rghetia): return an empty Tracestate when parsing tracestate header encounters an error.
-// Revisit to return additional boolean value to indicate parsing error when following issues
-// are resolved.
-// https://github.com/w3c/distributed-tracing/issues/172
-// https://github.com/w3c/distributed-tracing/issues/175
-func tracestateFromHeader(ts string) *tracestate.Tracestate {
- if ts == "" {
- return nil
- }
-
- var entries []tracestate.Entry
- pairs := strings.Split(ts, ",")
- hdrLenWithoutOWS := len(pairs) - 1 // Number of commas
- for _, pair := range pairs {
- matches := trimOWSRegExp.FindStringSubmatch(pair)
- if matches == nil {
- return nil
- }
- pair = matches[1]
- hdrLenWithoutOWS += len(pair)
- if hdrLenWithoutOWS > maxTracestateLen {
- return nil
- }
- kv := strings.Split(pair, "=")
- if len(kv) != 2 {
- return nil
- }
- entries = append(entries, tracestate.Entry{Key: kv[0], Value: kv[1]})
- }
- tsParsed, err := tracestate.New(nil, entries...)
- if err != nil {
- return nil
- }
-
- return tsParsed
-}
-
-func tracestateToHeader(sc trace.SpanContext) string {
- var pairs = make([]string, 0, len(sc.Tracestate.Entries()))
- if sc.Tracestate != nil {
- for _, entry := range sc.Tracestate.Entries() {
- pairs = append(pairs, strings.Join([]string{entry.Key, entry.Value}, "="))
- }
- h := strings.Join(pairs, ",")
-
- if h != "" && len(h) <= maxTracestateLen {
- return h
- }
- }
- return ""
-}
-
-// SpanContextToHeaders serialize the SpanContext to traceparent and tracestate headers.
-func (f *HTTPFormat) SpanContextToHeaders(sc trace.SpanContext) (tp string, ts string) {
- tp = fmt.Sprintf("%x-%x-%x-%x",
- []byte{supportedVersion},
- sc.TraceID[:],
- sc.SpanID[:],
- []byte{byte(sc.TraceOptions)})
- ts = tracestateToHeader(sc)
- return
-}
-
-// SpanContextToRequest modifies the given request to include traceparent and tracestate headers.
-func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
- tp, ts := f.SpanContextToHeaders(sc)
- req.Header.Set(traceparentHeader, tp)
- if ts != "" {
- req.Header.Set(tracestateHeader, ts)
- }
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go
deleted file mode 100644
index 5e6a34307..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/route.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "context"
- "net/http"
-
- "go.opencensus.io/tag"
-)
-
-// SetRoute sets the http_server_route tag to the given value.
-// It's useful when an HTTP framework does not support the http.Handler interface
-// and using WithRouteTag is not an option, but provides a way to hook into the request flow.
-func SetRoute(ctx context.Context, route string) {
- if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok {
- a.t = append(a.t, tag.Upsert(KeyServerRoute, route))
- }
-}
-
-// WithRouteTag returns an http.Handler that records stats with the
-// http_server_route tag set to the given value.
-func WithRouteTag(handler http.Handler, route string) http.Handler {
- return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator {
- addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)}
- ctx, _ := tag.New(r.Context(), addRoute...)
- r = r.WithContext(ctx)
- handler.ServeHTTP(w, r)
- return addRoute
- })
-}
-
-// taggedHandlerFunc is a http.Handler that returns tags describing the
-// processing of the request. These tags will be recorded along with the
-// measures in this package at the end of the request.
-type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator
-
-func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- tags := h(w, r)
- if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok {
- a.t = append(a.t, tags...)
- }
-}
-
-type addedTagsKey struct{}
-
-type addedTags struct {
- t []tag.Mutator
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go
deleted file mode 100644
index f7c8434be..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/server.go
+++ /dev/null
@@ -1,455 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "context"
- "io"
- "net/http"
- "strconv"
- "sync"
- "time"
-
- "go.opencensus.io/stats"
- "go.opencensus.io/tag"
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-// Handler is an http.Handler wrapper to instrument your HTTP server with
-// OpenCensus. It supports both stats and tracing.
-//
-// # Tracing
-//
-// This handler is aware of the incoming request's span, reading it from request
-// headers as configured using the Propagation field.
-// The extracted span can be accessed from the incoming request's
-// context.
-//
-// span := trace.FromContext(r.Context())
-//
-// The server span will be automatically ended at the end of ServeHTTP.
-type Handler struct {
- // Propagation defines how traces are propagated. If unspecified,
- // B3 propagation will be used.
- Propagation propagation.HTTPFormat
-
- // Handler is the handler used to handle the incoming request.
- Handler http.Handler
-
- // StartOptions are applied to the span started by this Handler around each
- // request.
- //
- // StartOptions.SpanKind will always be set to trace.SpanKindServer
- // for spans started by this transport.
- StartOptions trace.StartOptions
-
- // GetStartOptions allows to set start options per request. If set,
- // StartOptions is going to be ignored.
- GetStartOptions func(*http.Request) trace.StartOptions
-
- // IsPublicEndpoint should be set to true for publicly accessible HTTP(S)
- // servers. If true, any trace metadata set on the incoming request will
- // be added as a linked trace instead of being added as a parent of the
- // current trace.
- IsPublicEndpoint bool
-
- // FormatSpanName holds the function to use for generating the span name
- // from the information found in the incoming HTTP Request. By default the
- // name equals the URL Path.
- FormatSpanName func(*http.Request) string
-
- // IsHealthEndpoint holds the function to use for determining if the
- // incoming HTTP request should be considered a health check. This is in
- // addition to the private isHealthEndpoint func which may also indicate
- // tracing should be skipped.
- IsHealthEndpoint func(*http.Request) bool
-}
-
-func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- var tags addedTags
- r, traceEnd := h.startTrace(w, r)
- defer traceEnd()
- w, statsEnd := h.startStats(w, r)
- defer statsEnd(&tags)
- handler := h.Handler
- if handler == nil {
- handler = http.DefaultServeMux
- }
- r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags))
- handler.ServeHTTP(w, r)
-}
-
-func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) {
- if h.IsHealthEndpoint != nil && h.IsHealthEndpoint(r) || isHealthEndpoint(r.URL.Path) {
- return r, func() {}
- }
- var name string
- if h.FormatSpanName == nil {
- name = spanNameFromURL(r)
- } else {
- name = h.FormatSpanName(r)
- }
- ctx := r.Context()
-
- startOpts := h.StartOptions
- if h.GetStartOptions != nil {
- startOpts = h.GetStartOptions(r)
- }
-
- var span *trace.Span
- sc, ok := h.extractSpanContext(r)
- if ok && !h.IsPublicEndpoint {
- ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc,
- trace.WithSampler(startOpts.Sampler),
- trace.WithSpanKind(trace.SpanKindServer))
- } else {
- ctx, span = trace.StartSpan(ctx, name,
- trace.WithSampler(startOpts.Sampler),
- trace.WithSpanKind(trace.SpanKindServer),
- )
- if ok {
- span.AddLink(trace.Link{
- TraceID: sc.TraceID,
- SpanID: sc.SpanID,
- Type: trace.LinkTypeParent,
- Attributes: nil,
- })
- }
- }
- span.AddAttributes(requestAttrs(r)...)
- if r.Body == nil {
- // TODO: Handle cases where ContentLength is not set.
- } else if r.ContentLength > 0 {
- span.AddMessageReceiveEvent(0, /* TODO: messageID */
- r.ContentLength, -1)
- }
- return r.WithContext(ctx), span.End
-}
-
-func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) {
- if h.Propagation == nil {
- return defaultFormat.SpanContextFromRequest(r)
- }
- return h.Propagation.SpanContextFromRequest(r)
-}
-
-func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) {
- ctx, _ := tag.New(r.Context(),
- tag.Upsert(Host, r.Host),
- tag.Upsert(Path, r.URL.Path),
- tag.Upsert(Method, r.Method))
- track := &trackingResponseWriter{
- start: time.Now(),
- ctx: ctx,
- writer: w,
- }
- if r.Body == nil {
- // TODO: Handle cases where ContentLength is not set.
- track.reqSize = -1
- } else if r.ContentLength > 0 {
- track.reqSize = r.ContentLength
- }
- stats.Record(ctx, ServerRequestCount.M(1))
- return track.wrappedResponseWriter(), track.end
-}
-
-type trackingResponseWriter struct {
- ctx context.Context
- reqSize int64
- respSize int64
- start time.Time
- statusCode int
- statusLine string
- endOnce sync.Once
- writer http.ResponseWriter
-}
-
-// Compile time assertion for ResponseWriter interface
-var _ http.ResponseWriter = (*trackingResponseWriter)(nil)
-
-func (t *trackingResponseWriter) end(tags *addedTags) {
- t.endOnce.Do(func() {
- if t.statusCode == 0 {
- t.statusCode = 200
- }
-
- span := trace.FromContext(t.ctx)
- span.SetStatus(TraceStatus(t.statusCode, t.statusLine))
- span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode)))
-
- m := []stats.Measurement{
- ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)),
- ServerResponseBytes.M(t.respSize),
- }
- if t.reqSize >= 0 {
- m = append(m, ServerRequestBytes.M(t.reqSize))
- }
- allTags := make([]tag.Mutator, len(tags.t)+1)
- allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))
- copy(allTags[1:], tags.t)
- stats.RecordWithTags(t.ctx, allTags, m...)
- })
-}
-
-func (t *trackingResponseWriter) Header() http.Header {
- return t.writer.Header()
-}
-
-func (t *trackingResponseWriter) Write(data []byte) (int, error) {
- n, err := t.writer.Write(data)
- t.respSize += int64(n)
- // Add message event for request bytes sent.
- span := trace.FromContext(t.ctx)
- span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1)
- return n, err
-}
-
-func (t *trackingResponseWriter) WriteHeader(statusCode int) {
- t.writer.WriteHeader(statusCode)
- t.statusCode = statusCode
- t.statusLine = http.StatusText(t.statusCode)
-}
-
-// wrappedResponseWriter returns a wrapped version of the original
-//
-// ResponseWriter and only implements the same combination of additional
-//
-// interfaces as the original.
-// This implementation is based on https://github.com/felixge/httpsnoop.
-func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter {
- var (
- hj, i0 = t.writer.(http.Hijacker)
- cn, i1 = t.writer.(http.CloseNotifier)
- pu, i2 = t.writer.(http.Pusher)
- fl, i3 = t.writer.(http.Flusher)
- rf, i4 = t.writer.(io.ReaderFrom)
- )
-
- switch {
- case !i0 && !i1 && !i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- }{t}
- case !i0 && !i1 && !i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- io.ReaderFrom
- }{t, rf}
- case !i0 && !i1 && !i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Flusher
- }{t, fl}
- case !i0 && !i1 && !i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.Flusher
- io.ReaderFrom
- }{t, fl, rf}
- case !i0 && !i1 && i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Pusher
- }{t, pu}
- case !i0 && !i1 && i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.Pusher
- io.ReaderFrom
- }{t, pu, rf}
- case !i0 && !i1 && i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Pusher
- http.Flusher
- }{t, pu, fl}
- case !i0 && !i1 && i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.Pusher
- http.Flusher
- io.ReaderFrom
- }{t, pu, fl, rf}
- case !i0 && i1 && !i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- }{t, cn}
- case !i0 && i1 && !i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- io.ReaderFrom
- }{t, cn, rf}
- case !i0 && i1 && !i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- http.Flusher
- }{t, cn, fl}
- case !i0 && i1 && !i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- http.Flusher
- io.ReaderFrom
- }{t, cn, fl, rf}
- case !i0 && i1 && i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- http.Pusher
- }{t, cn, pu}
- case !i0 && i1 && i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- http.Pusher
- io.ReaderFrom
- }{t, cn, pu, rf}
- case !i0 && i1 && i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- http.Pusher
- http.Flusher
- }{t, cn, pu, fl}
- case !i0 && i1 && i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- http.Pusher
- http.Flusher
- io.ReaderFrom
- }{t, cn, pu, fl, rf}
- case i0 && !i1 && !i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- }{t, hj}
- case i0 && !i1 && !i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- io.ReaderFrom
- }{t, hj, rf}
- case i0 && !i1 && !i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.Flusher
- }{t, hj, fl}
- case i0 && !i1 && !i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.Flusher
- io.ReaderFrom
- }{t, hj, fl, rf}
- case i0 && !i1 && i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.Pusher
- }{t, hj, pu}
- case i0 && !i1 && i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.Pusher
- io.ReaderFrom
- }{t, hj, pu, rf}
- case i0 && !i1 && i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.Pusher
- http.Flusher
- }{t, hj, pu, fl}
- case i0 && !i1 && i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.Pusher
- http.Flusher
- io.ReaderFrom
- }{t, hj, pu, fl, rf}
- case i0 && i1 && !i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- }{t, hj, cn}
- case i0 && i1 && !i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- io.ReaderFrom
- }{t, hj, cn, rf}
- case i0 && i1 && !i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- http.Flusher
- }{t, hj, cn, fl}
- case i0 && i1 && !i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- http.Flusher
- io.ReaderFrom
- }{t, hj, cn, fl, rf}
- case i0 && i1 && i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- http.Pusher
- }{t, hj, cn, pu}
- case i0 && i1 && i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- http.Pusher
- io.ReaderFrom
- }{t, hj, cn, pu, rf}
- case i0 && i1 && i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- http.Pusher
- http.Flusher
- }{t, hj, cn, pu, fl}
- case i0 && i1 && i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- http.Pusher
- http.Flusher
- io.ReaderFrom
- }{t, hj, cn, pu, fl, rf}
- default:
- return struct {
- http.ResponseWriter
- }{t}
- }
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go
deleted file mode 100644
index 05c6c56cc..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "crypto/tls"
- "net/http"
- "net/http/httptrace"
- "strings"
-
- "go.opencensus.io/trace"
-)
-
-type spanAnnotator struct {
- sp *trace.Span
-}
-
-// TODO: Remove NewSpanAnnotator at the next release.
-
-// NewSpanAnnotator returns a httptrace.ClientTrace which annotates
-// all emitted httptrace events on the provided Span.
-// Deprecated: Use NewSpanAnnotatingClientTrace instead
-func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace {
- return NewSpanAnnotatingClientTrace(r, s)
-}
-
-// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates
-// all emitted httptrace events on the provided Span.
-func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace {
- sa := spanAnnotator{sp: s}
-
- return &httptrace.ClientTrace{
- GetConn: sa.getConn,
- GotConn: sa.gotConn,
- PutIdleConn: sa.putIdleConn,
- GotFirstResponseByte: sa.gotFirstResponseByte,
- Got100Continue: sa.got100Continue,
- DNSStart: sa.dnsStart,
- DNSDone: sa.dnsDone,
- ConnectStart: sa.connectStart,
- ConnectDone: sa.connectDone,
- TLSHandshakeStart: sa.tlsHandshakeStart,
- TLSHandshakeDone: sa.tlsHandshakeDone,
- WroteHeaders: sa.wroteHeaders,
- Wait100Continue: sa.wait100Continue,
- WroteRequest: sa.wroteRequest,
- }
-}
-
-func (s spanAnnotator) getConn(hostPort string) {
- attrs := []trace.Attribute{
- trace.StringAttribute("httptrace.get_connection.host_port", hostPort),
- }
- s.sp.Annotate(attrs, "GetConn")
-}
-
-func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) {
- attrs := []trace.Attribute{
- trace.BoolAttribute("httptrace.got_connection.reused", info.Reused),
- trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle),
- }
- if info.WasIdle {
- attrs = append(attrs,
- trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String()))
- }
- s.sp.Annotate(attrs, "GotConn")
-}
-
-// PutIdleConn implements a httptrace.ClientTrace hook
-func (s spanAnnotator) putIdleConn(err error) {
- var attrs []trace.Attribute
- if err != nil {
- attrs = append(attrs,
- trace.StringAttribute("httptrace.put_idle_connection.error", err.Error()))
- }
- s.sp.Annotate(attrs, "PutIdleConn")
-}
-
-func (s spanAnnotator) gotFirstResponseByte() {
- s.sp.Annotate(nil, "GotFirstResponseByte")
-}
-
-func (s spanAnnotator) got100Continue() {
- s.sp.Annotate(nil, "Got100Continue")
-}
-
-func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) {
- attrs := []trace.Attribute{
- trace.StringAttribute("httptrace.dns_start.host", info.Host),
- }
- s.sp.Annotate(attrs, "DNSStart")
-}
-
-func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) {
- var addrs []string
- for _, addr := range info.Addrs {
- addrs = append(addrs, addr.String())
- }
- attrs := []trace.Attribute{
- trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")),
- }
- if info.Err != nil {
- attrs = append(attrs,
- trace.StringAttribute("httptrace.dns_done.error", info.Err.Error()))
- }
- s.sp.Annotate(attrs, "DNSDone")
-}
-
-func (s spanAnnotator) connectStart(network, addr string) {
- attrs := []trace.Attribute{
- trace.StringAttribute("httptrace.connect_start.network", network),
- trace.StringAttribute("httptrace.connect_start.addr", addr),
- }
- s.sp.Annotate(attrs, "ConnectStart")
-}
-
-func (s spanAnnotator) connectDone(network, addr string, err error) {
- attrs := []trace.Attribute{
- trace.StringAttribute("httptrace.connect_done.network", network),
- trace.StringAttribute("httptrace.connect_done.addr", addr),
- }
- if err != nil {
- attrs = append(attrs,
- trace.StringAttribute("httptrace.connect_done.error", err.Error()))
- }
- s.sp.Annotate(attrs, "ConnectDone")
-}
-
-func (s spanAnnotator) tlsHandshakeStart() {
- s.sp.Annotate(nil, "TLSHandshakeStart")
-}
-
-func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) {
- var attrs []trace.Attribute
- if err != nil {
- attrs = append(attrs,
- trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error()))
- }
- s.sp.Annotate(attrs, "TLSHandshakeDone")
-}
-
-func (s spanAnnotator) wroteHeaders() {
- s.sp.Annotate(nil, "WroteHeaders")
-}
-
-func (s spanAnnotator) wait100Continue() {
- s.sp.Annotate(nil, "Wait100Continue")
-}
-
-func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) {
- var attrs []trace.Attribute
- if info.Err != nil {
- attrs = append(attrs,
- trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error()))
- }
- s.sp.Annotate(attrs, "WroteRequest")
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go
deleted file mode 100644
index ee3729040..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/stats.go
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
-)
-
-// Deprecated: client HTTP measures.
-var (
- // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect.
- ClientRequestCount = stats.Int64(
- "opencensus.io/http/client/request_count",
- "Number of HTTP requests started",
- stats.UnitDimensionless)
- // Deprecated: Use ClientSentBytes.
- ClientRequestBytes = stats.Int64(
- "opencensus.io/http/client/request_bytes",
- "HTTP request body size if set as ContentLength (uncompressed)",
- stats.UnitBytes)
- // Deprecated: Use ClientReceivedBytes.
- ClientResponseBytes = stats.Int64(
- "opencensus.io/http/client/response_bytes",
- "HTTP response body size (uncompressed)",
- stats.UnitBytes)
- // Deprecated: Use ClientRoundtripLatency.
- ClientLatency = stats.Float64(
- "opencensus.io/http/client/latency",
- "End-to-end latency",
- stats.UnitMilliseconds)
-)
-
-// The following client HTTP measures are supported for use in custom views.
-var (
- ClientSentBytes = stats.Int64(
- "opencensus.io/http/client/sent_bytes",
- "Total bytes sent in request body (not including headers)",
- stats.UnitBytes,
- )
- ClientReceivedBytes = stats.Int64(
- "opencensus.io/http/client/received_bytes",
- "Total bytes received in response bodies (not including headers but including error responses with bodies)",
- stats.UnitBytes,
- )
- ClientRoundtripLatency = stats.Float64(
- "opencensus.io/http/client/roundtrip_latency",
- "Time between first byte of request headers sent to last byte of response received, or terminal error",
- stats.UnitMilliseconds,
- )
-)
-
-// The following server HTTP measures are supported for use in custom views:
-var (
- ServerRequestCount = stats.Int64(
- "opencensus.io/http/server/request_count",
- "Number of HTTP requests started",
- stats.UnitDimensionless)
- ServerRequestBytes = stats.Int64(
- "opencensus.io/http/server/request_bytes",
- "HTTP request body size if set as ContentLength (uncompressed)",
- stats.UnitBytes)
- ServerResponseBytes = stats.Int64(
- "opencensus.io/http/server/response_bytes",
- "HTTP response body size (uncompressed)",
- stats.UnitBytes)
- ServerLatency = stats.Float64(
- "opencensus.io/http/server/latency",
- "End-to-end latency",
- stats.UnitMilliseconds)
-)
-
-// The following tags are applied to stats recorded by this package. Host, Path
-// and Method are applied to all measures. StatusCode is not applied to
-// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known.
-var (
- // Host is the value of the HTTP Host header.
- //
- // The value of this tag can be controlled by the HTTP client, so you need
- // to watch out for potentially generating high-cardinality labels in your
- // metrics backend if you use this tag in views.
- Host = tag.MustNewKey("http.host")
-
- // StatusCode is the numeric HTTP response status code,
- // or "error" if a transport error occurred and no status code was read.
- StatusCode = tag.MustNewKey("http.status")
-
- // Path is the URL path (not including query string) in the request.
- //
- // The value of this tag can be controlled by the HTTP client, so you need
- // to watch out for potentially generating high-cardinality labels in your
- // metrics backend if you use this tag in views.
- Path = tag.MustNewKey("http.path")
-
- // Method is the HTTP method of the request, capitalized (GET, POST, etc.).
- Method = tag.MustNewKey("http.method")
-
- // KeyServerRoute is a low cardinality string representing the logical
- // handler of the request. This is usually the pattern registered on the a
- // ServeMux (or similar string).
- KeyServerRoute = tag.MustNewKey("http_server_route")
-)
-
-// Client tag keys.
-var (
- // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.).
- KeyClientMethod = tag.MustNewKey("http_client_method")
- // KeyClientPath is the URL path (not including query string).
- KeyClientPath = tag.MustNewKey("http_client_path")
- // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received.
- KeyClientStatus = tag.MustNewKey("http_client_status")
- // KeyClientHost is the value of the request Host header.
- KeyClientHost = tag.MustNewKey("http_client_host")
-)
-
-// Default distributions used by views in this package.
-var (
- DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
- DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
-)
-
-// Package ochttp provides some convenience views for client measures.
-// You still need to register these views for data to actually be collected.
-var (
- ClientSentBytesDistribution = &view.View{
- Name: "opencensus.io/http/client/sent_bytes",
- Measure: ClientSentBytes,
- Aggregation: DefaultSizeDistribution,
- Description: "Total bytes sent in request body (not including headers), by HTTP method and response status",
- TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
- }
-
- ClientReceivedBytesDistribution = &view.View{
- Name: "opencensus.io/http/client/received_bytes",
- Measure: ClientReceivedBytes,
- Aggregation: DefaultSizeDistribution,
- Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status",
- TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
- }
-
- ClientRoundtripLatencyDistribution = &view.View{
- Name: "opencensus.io/http/client/roundtrip_latency",
- Measure: ClientRoundtripLatency,
- Aggregation: DefaultLatencyDistribution,
- Description: "End-to-end latency, by HTTP method and response status",
- TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
- }
-
- ClientCompletedCount = &view.View{
- Name: "opencensus.io/http/client/completed_count",
- Measure: ClientRoundtripLatency,
- Aggregation: view.Count(),
- Description: "Count of completed requests, by HTTP method and response status",
- TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
- }
-)
-
-// Deprecated: Old client Views.
-var (
- // Deprecated: No direct replacement, but see ClientCompletedCount.
- ClientRequestCountView = &view.View{
- Name: "opencensus.io/http/client/request_count",
- Description: "Count of HTTP requests started",
- Measure: ClientRequestCount,
- Aggregation: view.Count(),
- }
-
- // Deprecated: Use ClientSentBytesDistribution.
- ClientRequestBytesView = &view.View{
- Name: "opencensus.io/http/client/request_bytes",
- Description: "Size distribution of HTTP request body",
- Measure: ClientSentBytes,
- Aggregation: DefaultSizeDistribution,
- }
-
- // Deprecated: Use ClientReceivedBytesDistribution instead.
- ClientResponseBytesView = &view.View{
- Name: "opencensus.io/http/client/response_bytes",
- Description: "Size distribution of HTTP response body",
- Measure: ClientReceivedBytes,
- Aggregation: DefaultSizeDistribution,
- }
-
- // Deprecated: Use ClientRoundtripLatencyDistribution instead.
- ClientLatencyView = &view.View{
- Name: "opencensus.io/http/client/latency",
- Description: "Latency distribution of HTTP requests",
- Measure: ClientRoundtripLatency,
- Aggregation: DefaultLatencyDistribution,
- }
-
- // Deprecated: Use ClientCompletedCount instead.
- ClientRequestCountByMethod = &view.View{
- Name: "opencensus.io/http/client/request_count_by_method",
- Description: "Client request count by HTTP method",
- TagKeys: []tag.Key{Method},
- Measure: ClientSentBytes,
- Aggregation: view.Count(),
- }
-
- // Deprecated: Use ClientCompletedCount instead.
- ClientResponseCountByStatusCode = &view.View{
- Name: "opencensus.io/http/client/response_count_by_status_code",
- Description: "Client response count by status code",
- TagKeys: []tag.Key{StatusCode},
- Measure: ClientRoundtripLatency,
- Aggregation: view.Count(),
- }
-)
-
-// Package ochttp provides some convenience views for server measures.
-// You still need to register these views for data to actually be collected.
-var (
- ServerRequestCountView = &view.View{
- Name: "opencensus.io/http/server/request_count",
- Description: "Count of HTTP requests started",
- Measure: ServerRequestCount,
- Aggregation: view.Count(),
- }
-
- ServerRequestBytesView = &view.View{
- Name: "opencensus.io/http/server/request_bytes",
- Description: "Size distribution of HTTP request body",
- Measure: ServerRequestBytes,
- Aggregation: DefaultSizeDistribution,
- }
-
- ServerResponseBytesView = &view.View{
- Name: "opencensus.io/http/server/response_bytes",
- Description: "Size distribution of HTTP response body",
- Measure: ServerResponseBytes,
- Aggregation: DefaultSizeDistribution,
- }
-
- ServerLatencyView = &view.View{
- Name: "opencensus.io/http/server/latency",
- Description: "Latency distribution of HTTP requests",
- Measure: ServerLatency,
- Aggregation: DefaultLatencyDistribution,
- }
-
- ServerRequestCountByMethod = &view.View{
- Name: "opencensus.io/http/server/request_count_by_method",
- Description: "Server request count by HTTP method",
- TagKeys: []tag.Key{Method},
- Measure: ServerRequestCount,
- Aggregation: view.Count(),
- }
-
- ServerResponseCountByStatusCode = &view.View{
- Name: "opencensus.io/http/server/response_count_by_status_code",
- Description: "Server response count by status code",
- TagKeys: []tag.Key{StatusCode},
- Measure: ServerLatency,
- Aggregation: view.Count(),
- }
-)
-
-// DefaultClientViews are the default client views provided by this package.
-// Deprecated: No replacement. Register the views you would like individually.
-var DefaultClientViews = []*view.View{
- ClientRequestCountView,
- ClientRequestBytesView,
- ClientResponseBytesView,
- ClientLatencyView,
- ClientRequestCountByMethod,
- ClientResponseCountByStatusCode,
-}
-
-// DefaultServerViews are the default server views provided by this package.
-// Deprecated: No replacement. Register the views you would like individually.
-var DefaultServerViews = []*view.View{
- ServerRequestCountView,
- ServerRequestBytesView,
- ServerResponseBytesView,
- ServerLatencyView,
- ServerRequestCountByMethod,
- ServerResponseCountByStatusCode,
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go
deleted file mode 100644
index ed3a5db56..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/trace.go
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "io"
- "net/http"
- "net/http/httptrace"
-
- "go.opencensus.io/plugin/ochttp/propagation/b3"
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-// TODO(jbd): Add godoc examples.
-
-var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{}
-
-// Attributes recorded on the span for the requests.
-// Only trace exporters will need them.
-const (
- HostAttribute = "http.host"
- MethodAttribute = "http.method"
- PathAttribute = "http.path"
- URLAttribute = "http.url"
- UserAgentAttribute = "http.user_agent"
- StatusCodeAttribute = "http.status_code"
-)
-
-type traceTransport struct {
- base http.RoundTripper
- startOptions trace.StartOptions
- format propagation.HTTPFormat
- formatSpanName func(*http.Request) string
- newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace
-}
-
-// TODO(jbd): Add message events for request and response size.
-
-// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers.
-// The created span can follow a parent span, if a parent is presented in
-// the request's context.
-func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- name := t.formatSpanName(req)
- // TODO(jbd): Discuss whether we want to prefix
- // outgoing requests with Sent.
- ctx, span := trace.StartSpan(req.Context(), name,
- trace.WithSampler(t.startOptions.Sampler),
- trace.WithSpanKind(trace.SpanKindClient))
-
- if t.newClientTrace != nil {
- req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span)))
- } else {
- req = req.WithContext(ctx)
- }
-
- if t.format != nil {
- // SpanContextToRequest will modify its Request argument, which is
- // contrary to the contract for http.RoundTripper, so we need to
- // pass it a copy of the Request.
- // However, the Request struct itself was already copied by
- // the WithContext calls above and so we just need to copy the header.
- header := make(http.Header)
- for k, v := range req.Header {
- header[k] = v
- }
- req.Header = header
- t.format.SpanContextToRequest(span.SpanContext(), req)
- }
-
- span.AddAttributes(requestAttrs(req)...)
- resp, err := t.base.RoundTrip(req)
- if err != nil {
- span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
- span.End()
- return resp, err
- }
-
- span.AddAttributes(responseAttrs(resp)...)
- span.SetStatus(TraceStatus(resp.StatusCode, resp.Status))
-
- // span.End() will be invoked after
- // a read from resp.Body returns io.EOF or when
- // resp.Body.Close() is invoked.
- bt := &bodyTracker{rc: resp.Body, span: span}
- resp.Body = wrappedBody(bt, resp.Body)
- return resp, err
-}
-
-// bodyTracker wraps a response.Body and invokes
-// trace.EndSpan on encountering io.EOF on reading
-// the body of the original response.
-type bodyTracker struct {
- rc io.ReadCloser
- span *trace.Span
-}
-
-var _ io.ReadCloser = (*bodyTracker)(nil)
-
-func (bt *bodyTracker) Read(b []byte) (int, error) {
- n, err := bt.rc.Read(b)
-
- switch err {
- case nil:
- return n, nil
- case io.EOF:
- bt.span.End()
- default:
- // For all other errors, set the span status
- bt.span.SetStatus(trace.Status{
- // Code 2 is the error code for Internal server error.
- Code: 2,
- Message: err.Error(),
- })
- }
- return n, err
-}
-
-func (bt *bodyTracker) Close() error {
- // Invoking endSpan on Close will help catch the cases
- // in which a read returned a non-nil error, we set the
- // span status but didn't end the span.
- bt.span.End()
- return bt.rc.Close()
-}
-
-// CancelRequest cancels an in-flight request by closing its connection.
-func (t *traceTransport) CancelRequest(req *http.Request) {
- type canceler interface {
- CancelRequest(*http.Request)
- }
- if cr, ok := t.base.(canceler); ok {
- cr.CancelRequest(req)
- }
-}
-
-func spanNameFromURL(req *http.Request) string {
- return req.URL.Path
-}
-
-func requestAttrs(r *http.Request) []trace.Attribute {
- userAgent := r.UserAgent()
-
- attrs := make([]trace.Attribute, 0, 5)
- attrs = append(attrs,
- trace.StringAttribute(PathAttribute, r.URL.Path),
- trace.StringAttribute(URLAttribute, r.URL.String()),
- trace.StringAttribute(HostAttribute, r.Host),
- trace.StringAttribute(MethodAttribute, r.Method),
- )
-
- if userAgent != "" {
- attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent))
- }
-
- return attrs
-}
-
-func responseAttrs(resp *http.Response) []trace.Attribute {
- return []trace.Attribute{
- trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)),
- }
-}
-
-// TraceStatus is a utility to convert the HTTP status code to a trace.Status that
-// represents the outcome as closely as possible.
-func TraceStatus(httpStatusCode int, statusLine string) trace.Status {
- var code int32
- if httpStatusCode < 200 || httpStatusCode >= 400 {
- code = trace.StatusCodeUnknown
- }
- switch httpStatusCode {
- case 499:
- code = trace.StatusCodeCancelled
- case http.StatusBadRequest:
- code = trace.StatusCodeInvalidArgument
- case http.StatusUnprocessableEntity:
- code = trace.StatusCodeInvalidArgument
- case http.StatusGatewayTimeout:
- code = trace.StatusCodeDeadlineExceeded
- case http.StatusNotFound:
- code = trace.StatusCodeNotFound
- case http.StatusForbidden:
- code = trace.StatusCodePermissionDenied
- case http.StatusUnauthorized: // 401 is actually unauthenticated.
- code = trace.StatusCodeUnauthenticated
- case http.StatusTooManyRequests:
- code = trace.StatusCodeResourceExhausted
- case http.StatusNotImplemented:
- code = trace.StatusCodeUnimplemented
- case http.StatusServiceUnavailable:
- code = trace.StatusCodeUnavailable
- case http.StatusOK:
- code = trace.StatusCodeOK
- case http.StatusConflict:
- code = trace.StatusCodeAlreadyExists
- }
-
- return trace.Status{Code: code, Message: codeToStr[code]}
-}
-
-var codeToStr = map[int32]string{
- trace.StatusCodeOK: `OK`,
- trace.StatusCodeCancelled: `CANCELLED`,
- trace.StatusCodeUnknown: `UNKNOWN`,
- trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`,
- trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`,
- trace.StatusCodeNotFound: `NOT_FOUND`,
- trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`,
- trace.StatusCodePermissionDenied: `PERMISSION_DENIED`,
- trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`,
- trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`,
- trace.StatusCodeAborted: `ABORTED`,
- trace.StatusCodeOutOfRange: `OUT_OF_RANGE`,
- trace.StatusCodeUnimplemented: `UNIMPLEMENTED`,
- trace.StatusCodeInternal: `INTERNAL`,
- trace.StatusCodeUnavailable: `UNAVAILABLE`,
- trace.StatusCodeDataLoss: `DATA_LOSS`,
- trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`,
-}
-
-func isHealthEndpoint(path string) bool {
- // Health checking is pretty frequent and
- // traces collected for health endpoints
- // can be extremely noisy and expensive.
- // Disable canonical health checking endpoints
- // like /healthz and /_ah/health for now.
- if path == "/healthz" || path == "/_ah/health" {
- return true
- }
- return false
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go
deleted file mode 100644
index 7d75cae2b..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "io"
-)
-
-// wrappedBody returns a wrapped version of the original
-// Body and only implements the same combination of additional
-// interfaces as the original.
-func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser {
- var (
- wr, i0 = body.(io.Writer)
- )
- switch {
- case !i0:
- return struct {
- io.ReadCloser
- }{wrapper}
-
- case i0:
- return struct {
- io.ReadCloser
- io.Writer
- }{wrapper, wr}
- default:
- return struct {
- io.ReadCloser
- }{wrapper}
- }
-}
diff --git a/vendor/go.opencensus.io/resource/resource.go b/vendor/go.opencensus.io/resource/resource.go
deleted file mode 100644
index b1764e1d3..000000000
--- a/vendor/go.opencensus.io/resource/resource.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package resource provides functionality for resource, which capture
-// identifying information about the entities for which signals are exported.
-package resource
-
-import (
- "context"
- "fmt"
- "os"
- "regexp"
- "sort"
- "strconv"
- "strings"
-)
-
-// Environment variables used by FromEnv to decode a resource.
-const (
- EnvVarType = "OC_RESOURCE_TYPE"
- EnvVarLabels = "OC_RESOURCE_LABELS"
-)
-
-// Resource describes an entity about which identifying information and metadata is exposed.
-// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace.
-type Resource struct {
- Type string
- Labels map[string]string
-}
-
-// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable.
-func EncodeLabels(labels map[string]string) string {
- sortedKeys := make([]string, 0, len(labels))
- for k := range labels {
- sortedKeys = append(sortedKeys, k)
- }
- sort.Strings(sortedKeys)
-
- s := ""
- for i, k := range sortedKeys {
- if i > 0 {
- s += ","
- }
- s += k + "=" + strconv.Quote(labels[k])
- }
- return s
-}
-
-var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`)
-
-// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable.
-// A list of labels of the form `="",="",...` is accepted.
-// Domain names and paths are accepted as label keys.
-// Most users will want to use FromEnv instead.
-func DecodeLabels(s string) (map[string]string, error) {
- m := map[string]string{}
- // Ensure a trailing comma, which allows us to keep the regex simpler
- s = strings.TrimRight(strings.TrimSpace(s), ",") + ","
-
- for len(s) > 0 {
- match := labelRegex.FindStringSubmatch(s)
- if len(match) == 0 {
- return nil, fmt.Errorf("invalid label formatting, remainder: %s", s)
- }
- v := match[2]
- if v == "" {
- v = match[3]
- } else {
- var err error
- if v, err = strconv.Unquote(v); err != nil {
- return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err)
- }
- }
- m[match[1]] = v
-
- s = s[len(match[0]):]
- }
- return m, nil
-}
-
-// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE
-// and OC_RESOURCE_labelS environment variables.
-func FromEnv(context.Context) (*Resource, error) {
- res := &Resource{
- Type: strings.TrimSpace(os.Getenv(EnvVarType)),
- }
- labels := strings.TrimSpace(os.Getenv(EnvVarLabels))
- if labels == "" {
- return res, nil
- }
- var err error
- if res.Labels, err = DecodeLabels(labels); err != nil {
- return nil, err
- }
- return res, nil
-}
-
-var _ Detector = FromEnv
-
-// merge resource information from b into a. In case of a collision, a takes precedence.
-func merge(a, b *Resource) *Resource {
- if a == nil {
- return b
- }
- if b == nil {
- return a
- }
- res := &Resource{
- Type: a.Type,
- Labels: map[string]string{},
- }
- if res.Type == "" {
- res.Type = b.Type
- }
- for k, v := range b.Labels {
- res.Labels[k] = v
- }
- // Labels from resource a overwrite labels from resource b.
- for k, v := range a.Labels {
- res.Labels[k] = v
- }
- return res
-}
-
-// Detector attempts to detect resource information.
-// If the detector cannot find resource information, the returned resource is nil but no
-// error is returned.
-// An error is only returned on unexpected failures.
-type Detector func(context.Context) (*Resource, error)
-
-// MultiDetector returns a Detector that calls all input detectors in order and
-// merges each result with the previous one. In case a type of label key is already set,
-// the first set value is takes precedence.
-// It returns on the first error that a sub-detector encounters.
-func MultiDetector(detectors ...Detector) Detector {
- return func(ctx context.Context) (*Resource, error) {
- return detectAll(ctx, detectors...)
- }
-}
-
-// detectall calls all input detectors sequentially an merges each result with the previous one.
-// It returns on the first error that a sub-detector encounters.
-func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) {
- var res *Resource
- for _, d := range detectors {
- r, err := d(ctx)
- if err != nil {
- return nil, err
- }
- res = merge(res, r)
- }
- return res, nil
-}
diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go
deleted file mode 100644
index 31477a464..000000000
--- a/vendor/go.opencensus.io/stats/doc.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-/*
-Package stats contains support for OpenCensus stats recording.
-
-OpenCensus allows users to create typed measures, record measurements,
-aggregate the collected data, and export the aggregated data.
-
-# Measures
-
-A measure represents a type of data point to be tracked and recorded.
-For example, latency, request Mb/s, and response Mb/s are measures
-to collect from a server.
-
-Measure constructors such as Int64 and Float64 automatically
-register the measure by the given name. Each registered measure needs
-to be unique by name. Measures also have a description and a unit.
-
-Libraries can define and export measures. Application authors can then
-create views and collect and break down measures by the tags they are
-interested in.
-
-# Recording measurements
-
-Measurement is a data point to be collected for a measure. For example,
-for a latency (ms) measure, 100 is a measurement that represents a 100ms
-latency event. Measurements are created from measures with
-the current context. Tags from the current context are recorded with the
-measurements if they are any.
-
-Recorded measurements are dropped immediately if no views are registered for them.
-There is usually no need to conditionally enable and disable
-recording to reduce cost. Recording of measurements is cheap.
-
-Libraries can always record measurements, and applications can later decide
-on which measurements they want to collect by registering views. This allows
-libraries to turn on the instrumentation by default.
-
-# Exemplars
-
-For a given recorded measurement, the associated exemplar is a diagnostic map
-that gives more information about the measurement.
-
-When aggregated using a Distribution aggregation, an exemplar is kept for each
-bucket in the Distribution. This allows you to easily find an example of a
-measurement that fell into each bucket.
-
-For example, if you also use the OpenCensus trace package and you
-record a measurement with a context that contains a sampled trace span,
-then the trace span will be added to the exemplar associated with the measurement.
-
-When exported to a supporting back end, you should be able to easily navigate
-to example traces that fell into each bucket in the Distribution.
-*/
-package stats // import "go.opencensus.io/stats"
diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go
deleted file mode 100644
index 436dc791f..000000000
--- a/vendor/go.opencensus.io/stats/internal/record.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "go.opencensus.io/tag"
-)
-
-// DefaultRecorder will be called for each Record call.
-var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{})
-
-// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but
-// avoids interface{} conversion.
-// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type,
-// but is interface{} here to avoid import loops
-var MeasurementRecorder interface{}
-
-// SubscriptionReporter reports when a view subscribed with a measure.
-var SubscriptionReporter func(measure string)
diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go
deleted file mode 100644
index 1ffd3cefc..000000000
--- a/vendor/go.opencensus.io/stats/measure.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package stats
-
-import (
- "sync"
- "sync/atomic"
-)
-
-// Measure represents a single numeric value to be tracked and recorded.
-// For example, latency, request bytes, and response bytes could be measures
-// to collect from a server.
-//
-// Measures by themselves have no outside effects. In order to be exported,
-// the measure needs to be used in a View. If no Views are defined over a
-// measure, there is very little cost in recording it.
-type Measure interface {
- // Name returns the name of this measure.
- //
- // Measure names are globally unique (among all libraries linked into your program).
- // We recommend prefixing the measure name with a domain name relevant to your
- // project or application.
- //
- // Measure names are never sent over the wire or exported to backends.
- // They are only used to create Views.
- Name() string
-
- // Description returns the human-readable description of this measure.
- Description() string
-
- // Unit returns the units for the values this measure takes on.
- //
- // Units are encoded according to the case-sensitive abbreviations from the
- // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
- Unit() string
-}
-
-// measureDescriptor is the untyped descriptor associated with each measure.
-// Int64Measure and Float64Measure wrap measureDescriptor to provide typed
-// recording APIs.
-// Two Measures with the same name will have the same measureDescriptor.
-type measureDescriptor struct {
- subs int32 // access atomically
-
- name string
- description string
- unit string
-}
-
-func (m *measureDescriptor) subscribe() {
- atomic.StoreInt32(&m.subs, 1)
-}
-
-func (m *measureDescriptor) subscribed() bool {
- return atomic.LoadInt32(&m.subs) == 1
-}
-
-var (
- mu sync.RWMutex
- measures = make(map[string]*measureDescriptor)
-)
-
-func registerMeasureHandle(name, desc, unit string) *measureDescriptor {
- mu.Lock()
- defer mu.Unlock()
-
- if stored, ok := measures[name]; ok {
- return stored
- }
- m := &measureDescriptor{
- name: name,
- description: desc,
- unit: unit,
- }
- measures[name] = m
- return m
-}
-
-// Measurement is the numeric value measured when recording stats. Each measure
-// provides methods to create measurements of their kind. For example, Int64Measure
-// provides M to convert an int64 into a measurement.
-type Measurement struct {
- v float64
- m Measure
- desc *measureDescriptor
-}
-
-// Value returns the value of the Measurement as a float64.
-func (m Measurement) Value() float64 {
- return m.v
-}
-
-// Measure returns the Measure from which this Measurement was created.
-func (m Measurement) Measure() Measure {
- return m.m
-}
diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go
deleted file mode 100644
index f02c1eda8..000000000
--- a/vendor/go.opencensus.io/stats/measure_float64.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package stats
-
-// Float64Measure is a measure for float64 values.
-type Float64Measure struct {
- desc *measureDescriptor
-}
-
-// M creates a new float64 measurement.
-// Use Record to record measurements.
-func (m *Float64Measure) M(v float64) Measurement {
- return Measurement{
- m: m,
- desc: m.desc,
- v: v,
- }
-}
-
-// Float64 creates a new measure for float64 values.
-//
-// See the documentation for interface Measure for more guidance on the
-// parameters of this function.
-func Float64(name, description, unit string) *Float64Measure {
- mi := registerMeasureHandle(name, description, unit)
- return &Float64Measure{mi}
-}
-
-// Name returns the name of the measure.
-func (m *Float64Measure) Name() string {
- return m.desc.name
-}
-
-// Description returns the description of the measure.
-func (m *Float64Measure) Description() string {
- return m.desc.description
-}
-
-// Unit returns the unit of the measure.
-func (m *Float64Measure) Unit() string {
- return m.desc.unit
-}
diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go
deleted file mode 100644
index d101d7973..000000000
--- a/vendor/go.opencensus.io/stats/measure_int64.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package stats
-
-// Int64Measure is a measure for int64 values.
-type Int64Measure struct {
- desc *measureDescriptor
-}
-
-// M creates a new int64 measurement.
-// Use Record to record measurements.
-func (m *Int64Measure) M(v int64) Measurement {
- return Measurement{
- m: m,
- desc: m.desc,
- v: float64(v),
- }
-}
-
-// Int64 creates a new measure for int64 values.
-//
-// See the documentation for interface Measure for more guidance on the
-// parameters of this function.
-func Int64(name, description, unit string) *Int64Measure {
- mi := registerMeasureHandle(name, description, unit)
- return &Int64Measure{mi}
-}
-
-// Name returns the name of the measure.
-func (m *Int64Measure) Name() string {
- return m.desc.name
-}
-
-// Description returns the description of the measure.
-func (m *Int64Measure) Description() string {
- return m.desc.description
-}
-
-// Unit returns the unit of the measure.
-func (m *Int64Measure) Unit() string {
- return m.desc.unit
-}
diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go
deleted file mode 100644
index 8b5b99803..000000000
--- a/vendor/go.opencensus.io/stats/record.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package stats
-
-import (
- "context"
-
- "go.opencensus.io/metric/metricdata"
- "go.opencensus.io/stats/internal"
- "go.opencensus.io/tag"
-)
-
-func init() {
- internal.SubscriptionReporter = func(measure string) {
- mu.Lock()
- measures[measure].subscribe()
- mu.Unlock()
- }
-}
-
-// Recorder provides an interface for exporting measurement information from
-// the static Record method by using the WithRecorder option.
-type Recorder interface {
- // Record records a set of measurements associated with the given tags and attachments.
- // The second argument is a `[]Measurement`.
- Record(*tag.Map, interface{}, map[string]interface{})
-}
-
-type recordOptions struct {
- attachments metricdata.Attachments
- mutators []tag.Mutator
- measurements []Measurement
- recorder Recorder
-}
-
-// WithAttachments applies provided exemplar attachments.
-func WithAttachments(attachments metricdata.Attachments) Options {
- return func(ro *recordOptions) {
- ro.attachments = attachments
- }
-}
-
-// WithTags applies provided tag mutators.
-func WithTags(mutators ...tag.Mutator) Options {
- return func(ro *recordOptions) {
- ro.mutators = mutators
- }
-}
-
-// WithMeasurements applies provided measurements.
-func WithMeasurements(measurements ...Measurement) Options {
- return func(ro *recordOptions) {
- ro.measurements = measurements
- }
-}
-
-// WithRecorder records the measurements to the specified `Recorder`, rather
-// than to the global metrics recorder.
-func WithRecorder(meter Recorder) Options {
- return func(ro *recordOptions) {
- ro.recorder = meter
- }
-}
-
-// Options apply changes to recordOptions.
-type Options func(*recordOptions)
-
-func createRecordOption(ros ...Options) *recordOptions {
- o := &recordOptions{}
- for _, ro := range ros {
- ro(o)
- }
- return o
-}
-
-type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{})
-
-// Record records one or multiple measurements with the same context at once.
-// If there are any tags in the context, measurements will be tagged with them.
-func Record(ctx context.Context, ms ...Measurement) {
- // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality
- // (RecordOptions) we can reduce some allocations to speed up this hot path
- if len(ms) == 0 {
- return
- }
- recorder := internal.MeasurementRecorder.(measurementRecorder)
- record := false
- for _, m := range ms {
- if m.desc.subscribed() {
- record = true
- break
- }
- }
- if !record {
- return
- }
- recorder(tag.FromContext(ctx), ms, nil)
- return
-}
-
-// RecordWithTags records one or multiple measurements at once.
-//
-// Measurements will be tagged with the tags in the context mutated by the mutators.
-// RecordWithTags is useful if you want to record with tag mutations but don't want
-// to propagate the mutations in the context.
-func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error {
- return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...))
-}
-
-// RecordWithOptions records measurements from the given options (if any) against context
-// and tags and attachments in the options (if any).
-// If there are any tags in the context, measurements will be tagged with them.
-func RecordWithOptions(ctx context.Context, ros ...Options) error {
- o := createRecordOption(ros...)
- if len(o.measurements) == 0 {
- return nil
- }
- recorder := internal.DefaultRecorder
- if o.recorder != nil {
- recorder = o.recorder.Record
- }
- if recorder == nil {
- return nil
- }
- record := false
- for _, m := range o.measurements {
- if m.desc.subscribed() {
- record = true
- break
- }
- }
- if !record {
- return nil
- }
- if len(o.mutators) > 0 {
- var err error
- if ctx, err = tag.New(ctx, o.mutators...); err != nil {
- return err
- }
- }
- recorder(tag.FromContext(ctx), o.measurements, o.attachments)
- return nil
-}
diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go
deleted file mode 100644
index 736399652..000000000
--- a/vendor/go.opencensus.io/stats/units.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package stats
-
-// Units are encoded according to the case-sensitive abbreviations from the
-// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
-const (
- UnitNone = "1" // Deprecated: Use UnitDimensionless.
- UnitDimensionless = "1"
- UnitBytes = "By"
- UnitMilliseconds = "ms"
- UnitSeconds = "s"
-)
diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go
deleted file mode 100644
index 61f72d20d..000000000
--- a/vendor/go.opencensus.io/stats/view/aggregation.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import "time"
-
-// AggType represents the type of aggregation function used on a View.
-type AggType int
-
-// All available aggregation types.
-const (
- AggTypeNone AggType = iota // no aggregation; reserved for future use.
- AggTypeCount // the count aggregation, see Count.
- AggTypeSum // the sum aggregation, see Sum.
- AggTypeDistribution // the distribution aggregation, see Distribution.
- AggTypeLastValue // the last value aggregation, see LastValue.
-)
-
-func (t AggType) String() string {
- return aggTypeName[t]
-}
-
-var aggTypeName = map[AggType]string{
- AggTypeNone: "None",
- AggTypeCount: "Count",
- AggTypeSum: "Sum",
- AggTypeDistribution: "Distribution",
- AggTypeLastValue: "LastValue",
-}
-
-// Aggregation represents a data aggregation method. Use one of the functions:
-// Count, Sum, or Distribution to construct an Aggregation.
-type Aggregation struct {
- Type AggType // Type is the AggType of this Aggregation.
- Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution.
-
- newData func(time.Time) AggregationData
-}
-
-var (
- aggCount = &Aggregation{
- Type: AggTypeCount,
- newData: func(t time.Time) AggregationData {
- return &CountData{Start: t}
- },
- }
- aggSum = &Aggregation{
- Type: AggTypeSum,
- newData: func(t time.Time) AggregationData {
- return &SumData{Start: t}
- },
- }
-)
-
-// Count indicates that data collected and aggregated
-// with this method will be turned into a count value.
-// For example, total number of accepted requests can be
-// aggregated by using Count.
-func Count() *Aggregation {
- return aggCount
-}
-
-// Sum indicates that data collected and aggregated
-// with this method will be summed up.
-// For example, accumulated request bytes can be aggregated by using
-// Sum.
-func Sum() *Aggregation {
- return aggSum
-}
-
-// Distribution indicates that the desired aggregation is
-// a histogram distribution.
-//
-// A distribution aggregation may contain a histogram of the values in the
-// population. The bucket boundaries for that histogram are described
-// by the bounds. This defines len(bounds)+1 buckets.
-//
-// If len(bounds) >= 2 then the boundaries for bucket index i are:
-//
-// [-infinity, bounds[i]) for i = 0
-// [bounds[i-1], bounds[i]) for 0 < i < length
-// [bounds[i-1], +infinity) for i = length
-//
-// If len(bounds) is 0 then there is no histogram associated with the
-// distribution. There will be a single bucket with boundaries
-// (-infinity, +infinity).
-//
-// If len(bounds) is 1 then there is no finite buckets, and that single
-// element is the common boundary of the overflow and underflow buckets.
-func Distribution(bounds ...float64) *Aggregation {
- agg := &Aggregation{
- Type: AggTypeDistribution,
- Buckets: bounds,
- }
- agg.newData = func(t time.Time) AggregationData {
- return newDistributionData(agg, t)
- }
- return agg
-}
-
-// LastValue only reports the last value recorded using this
-// aggregation. All other measurements will be dropped.
-func LastValue() *Aggregation {
- return &Aggregation{
- Type: AggTypeLastValue,
- newData: func(_ time.Time) AggregationData {
- return &LastValueData{}
- },
- }
-}
diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go
deleted file mode 100644
index d93b52066..000000000
--- a/vendor/go.opencensus.io/stats/view/aggregation_data.go
+++ /dev/null
@@ -1,336 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import (
- "math"
- "time"
-
- "go.opencensus.io/metric/metricdata"
-)
-
-// AggregationData represents an aggregated value from a collection.
-// They are reported on the view data during exporting.
-// Mosts users won't directly access aggregration data.
-type AggregationData interface {
- isAggregationData() bool
- addSample(v float64, attachments map[string]interface{}, t time.Time)
- clone() AggregationData
- equal(other AggregationData) bool
- toPoint(t metricdata.Type, time time.Time) metricdata.Point
- StartTime() time.Time
-}
-
-const epsilon = 1e-9
-
-// CountData is the aggregated data for the Count aggregation.
-// A count aggregation processes data and counts the recordings.
-//
-// Most users won't directly access count data.
-type CountData struct {
- Start time.Time
- Value int64
-}
-
-func (a *CountData) isAggregationData() bool { return true }
-
-func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) {
- a.Value = a.Value + 1
-}
-
-func (a *CountData) clone() AggregationData {
- return &CountData{Value: a.Value, Start: a.Start}
-}
-
-func (a *CountData) equal(other AggregationData) bool {
- a2, ok := other.(*CountData)
- if !ok {
- return false
- }
-
- return a.Start.Equal(a2.Start) && a.Value == a2.Value
-}
-
-func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
- switch metricType {
- case metricdata.TypeCumulativeInt64:
- return metricdata.NewInt64Point(t, a.Value)
- default:
- panic("unsupported metricdata.Type")
- }
-}
-
-// StartTime returns the start time of the data being aggregated by CountData.
-func (a *CountData) StartTime() time.Time {
- return a.Start
-}
-
-// SumData is the aggregated data for the Sum aggregation.
-// A sum aggregation processes data and sums up the recordings.
-//
-// Most users won't directly access sum data.
-type SumData struct {
- Start time.Time
- Value float64
-}
-
-func (a *SumData) isAggregationData() bool { return true }
-
-func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) {
- a.Value += v
-}
-
-func (a *SumData) clone() AggregationData {
- return &SumData{Value: a.Value, Start: a.Start}
-}
-
-func (a *SumData) equal(other AggregationData) bool {
- a2, ok := other.(*SumData)
- if !ok {
- return false
- }
- return a.Start.Equal(a2.Start) && math.Pow(a.Value-a2.Value, 2) < epsilon
-}
-
-func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
- switch metricType {
- case metricdata.TypeCumulativeInt64:
- return metricdata.NewInt64Point(t, int64(a.Value))
- case metricdata.TypeCumulativeFloat64:
- return metricdata.NewFloat64Point(t, a.Value)
- default:
- panic("unsupported metricdata.Type")
- }
-}
-
-// StartTime returns the start time of the data being aggregated by SumData.
-func (a *SumData) StartTime() time.Time {
- return a.Start
-}
-
-// DistributionData is the aggregated data for the
-// Distribution aggregation.
-//
-// Most users won't directly access distribution data.
-//
-// For a distribution with N bounds, the associated DistributionData will have
-// N+1 buckets.
-type DistributionData struct {
- Count int64 // number of data points aggregated
- Min float64 // minimum value in the distribution
- Max float64 // max value in the distribution
- Mean float64 // mean of the distribution
- SumOfSquaredDev float64 // sum of the squared deviation from the mean
- CountPerBucket []int64 // number of occurrences per bucket
- // ExemplarsPerBucket is slice the same length as CountPerBucket containing
- // an exemplar for the associated bucket, or nil.
- ExemplarsPerBucket []*metricdata.Exemplar
- bounds []float64 // histogram distribution of the values
- Start time.Time
-}
-
-func newDistributionData(agg *Aggregation, t time.Time) *DistributionData {
- bucketCount := len(agg.Buckets) + 1
- return &DistributionData{
- CountPerBucket: make([]int64, bucketCount),
- ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount),
- bounds: agg.Buckets,
- Min: math.MaxFloat64,
- Max: math.SmallestNonzeroFloat64,
- Start: t,
- }
-}
-
-// Sum returns the sum of all samples collected.
-func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) }
-
-func (a *DistributionData) variance() float64 {
- if a.Count <= 1 {
- return 0
- }
- return a.SumOfSquaredDev / float64(a.Count-1)
-}
-
-func (a *DistributionData) isAggregationData() bool { return true }
-
-// TODO(songy23): support exemplar attachments.
-func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) {
- if v < a.Min {
- a.Min = v
- }
- if v > a.Max {
- a.Max = v
- }
- a.Count++
- a.addToBucket(v, attachments, t)
-
- if a.Count == 1 {
- a.Mean = v
- return
- }
-
- oldMean := a.Mean
- a.Mean = a.Mean + (v-a.Mean)/float64(a.Count)
- a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean)
-}
-
-func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) {
- var count *int64
- var i int
- var b float64
- for i, b = range a.bounds {
- if v < b {
- count = &a.CountPerBucket[i]
- break
- }
- }
- if count == nil { // Last bucket.
- i = len(a.bounds)
- count = &a.CountPerBucket[i]
- }
- *count++
- if exemplar := getExemplar(v, attachments, t); exemplar != nil {
- a.ExemplarsPerBucket[i] = exemplar
- }
-}
-
-func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar {
- if len(attachments) == 0 {
- return nil
- }
- return &metricdata.Exemplar{
- Value: v,
- Timestamp: t,
- Attachments: attachments,
- }
-}
-
-func (a *DistributionData) clone() AggregationData {
- c := *a
- c.CountPerBucket = append([]int64(nil), a.CountPerBucket...)
- c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...)
- return &c
-}
-
-func (a *DistributionData) equal(other AggregationData) bool {
- a2, ok := other.(*DistributionData)
- if !ok {
- return false
- }
- if a2 == nil {
- return false
- }
- if len(a.CountPerBucket) != len(a2.CountPerBucket) {
- return false
- }
- for i := range a.CountPerBucket {
- if a.CountPerBucket[i] != a2.CountPerBucket[i] {
- return false
- }
- }
- return a.Start.Equal(a2.Start) &&
- a.Count == a2.Count &&
- a.Min == a2.Min &&
- a.Max == a2.Max &&
- math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon
-}
-
-func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
- switch metricType {
- case metricdata.TypeCumulativeDistribution:
- buckets := []metricdata.Bucket{}
- for i := 0; i < len(a.CountPerBucket); i++ {
- buckets = append(buckets, metricdata.Bucket{
- Count: a.CountPerBucket[i],
- Exemplar: a.ExemplarsPerBucket[i],
- })
- }
- bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds}
-
- val := &metricdata.Distribution{
- Count: a.Count,
- Sum: a.Sum(),
- SumOfSquaredDeviation: a.SumOfSquaredDev,
- BucketOptions: bucketOptions,
- Buckets: buckets,
- }
- return metricdata.NewDistributionPoint(t, val)
-
- default:
- // TODO: [rghetia] when we have a use case for TypeGaugeDistribution.
- panic("unsupported metricdata.Type")
- }
-}
-
-// StartTime returns the start time of the data being aggregated by DistributionData.
-func (a *DistributionData) StartTime() time.Time {
- return a.Start
-}
-
-// LastValueData returns the last value recorded for LastValue aggregation.
-type LastValueData struct {
- Value float64
-}
-
-func (l *LastValueData) isAggregationData() bool {
- return true
-}
-
-func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) {
- l.Value = v
-}
-
-func (l *LastValueData) clone() AggregationData {
- return &LastValueData{l.Value}
-}
-
-func (l *LastValueData) equal(other AggregationData) bool {
- a2, ok := other.(*LastValueData)
- if !ok {
- return false
- }
- return l.Value == a2.Value
-}
-
-func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
- switch metricType {
- case metricdata.TypeGaugeInt64:
- return metricdata.NewInt64Point(t, int64(l.Value))
- case metricdata.TypeGaugeFloat64:
- return metricdata.NewFloat64Point(t, l.Value)
- default:
- panic("unsupported metricdata.Type")
- }
-}
-
-// StartTime returns an empty time value as start time is not recorded when using last value
-// aggregation.
-func (l *LastValueData) StartTime() time.Time {
- return time.Time{}
-}
-
-// ClearStart clears the Start field from data if present. Useful for testing in cases where the
-// start time will be nondeterministic.
-func ClearStart(data AggregationData) {
- switch data := data.(type) {
- case *CountData:
- data.Start = time.Time{}
- case *SumData:
- data.Start = time.Time{}
- case *DistributionData:
- data.Start = time.Time{}
- }
-}
diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go
deleted file mode 100644
index bcd6e08c7..000000000
--- a/vendor/go.opencensus.io/stats/view/collector.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import (
- "sort"
- "time"
-
- "go.opencensus.io/internal/tagencoding"
- "go.opencensus.io/tag"
-)
-
-type collector struct {
- // signatures holds the aggregations values for each unique tag signature
- // (values for all keys) to its aggregator.
- signatures map[string]AggregationData
- // Aggregation is the description of the aggregation to perform for this
- // view.
- a *Aggregation
-}
-
-func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) {
- aggregator, ok := c.signatures[s]
- if !ok {
- aggregator = c.a.newData(t)
- c.signatures[s] = aggregator
- }
- aggregator.addSample(v, attachments, t)
-}
-
-// collectRows returns a snapshot of the collected Row values.
-func (c *collector) collectedRows(keys []tag.Key) []*Row {
- rows := make([]*Row, 0, len(c.signatures))
- for sig, aggregator := range c.signatures {
- tags := decodeTags([]byte(sig), keys)
- row := &Row{Tags: tags, Data: aggregator.clone()}
- rows = append(rows, row)
- }
- return rows
-}
-
-func (c *collector) clearRows() {
- c.signatures = make(map[string]AggregationData)
-}
-
-// encodeWithKeys encodes the map by using values
-// only associated with the keys provided.
-func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte {
- // Compute the buffer length we will need ahead of time to avoid resizing later
- reqLen := 0
- for _, k := range keys {
- s, _ := m.Value(k)
- // We will store each key + its length
- reqLen += len(s) + 1
- }
- vb := &tagencoding.Values{
- Buffer: make([]byte, reqLen),
- }
- for _, k := range keys {
- v, _ := m.Value(k)
- vb.WriteValue([]byte(v))
- }
- return vb.Bytes()
-}
-
-// decodeTags decodes tags from the buffer and
-// orders them by the keys.
-func decodeTags(buf []byte, keys []tag.Key) []tag.Tag {
- vb := &tagencoding.Values{Buffer: buf}
- var tags []tag.Tag
- for _, k := range keys {
- v := vb.ReadValue()
- if v != nil {
- tags = append(tags, tag.Tag{Key: k, Value: string(v)})
- }
- }
- vb.ReadIndex = 0
- sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() })
- return tags
-}
diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go
deleted file mode 100644
index 60bf0e392..000000000
--- a/vendor/go.opencensus.io/stats/view/doc.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-// Package view contains support for collecting and exposing aggregates over stats.
-//
-// In order to collect measurements, views need to be defined and registered.
-// A view allows recorded measurements to be filtered and aggregated.
-//
-// All recorded measurements can be grouped by a list of tags.
-//
-// OpenCensus provides several aggregation methods: Count, Distribution and Sum.
-//
-// Count only counts the number of measurement points recorded.
-// Distribution provides statistical summary of the aggregated data by counting
-// how many recorded measurements fall into each bucket.
-// Sum adds up the measurement values.
-// LastValue just keeps track of the most recently recorded measurement value.
-// All aggregations are cumulative.
-//
-// Views can be registered and unregistered at any time during program execution.
-//
-// Libraries can define views but it is recommended that in most cases registering
-// views be left up to applications.
-//
-// # Exporting
-//
-// Collected and aggregated data can be exported to a metric collection
-// backend by registering its exporter.
-//
-// Multiple exporters can be registered to upload the data to various
-// different back ends.
-package view // import "go.opencensus.io/stats/view"
-
-// TODO(acetechnologist): Add a link to the language independent OpenCensus
-// spec when it is available.
diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go
deleted file mode 100644
index 73ba11f5b..000000000
--- a/vendor/go.opencensus.io/stats/view/export.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package view
-
-// Exporter exports the collected records as view data.
-//
-// The ExportView method should return quickly; if an
-// Exporter takes a significant amount of time to
-// process a Data, that work should be done on another goroutine.
-//
-// It is safe to assume that ExportView will not be called concurrently from
-// multiple goroutines.
-//
-// The Data should not be modified.
-type Exporter interface {
- ExportView(viewData *Data)
-}
-
-// RegisterExporter registers an exporter.
-// Collected data will be reported via all the
-// registered exporters. Once you no longer
-// want data to be exported, invoke UnregisterExporter
-// with the previously registered exporter.
-//
-// Binaries can register exporters, libraries shouldn't register exporters.
-func RegisterExporter(e Exporter) {
- defaultWorker.RegisterExporter(e)
-}
-
-// UnregisterExporter unregisters an exporter.
-func UnregisterExporter(e Exporter) {
- defaultWorker.UnregisterExporter(e)
-}
diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go
deleted file mode 100644
index 293b54ecb..000000000
--- a/vendor/go.opencensus.io/stats/view/view.go
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import (
- "bytes"
- "errors"
- "fmt"
- "reflect"
- "sort"
- "sync/atomic"
- "time"
-
- "go.opencensus.io/metric/metricdata"
- "go.opencensus.io/stats"
- "go.opencensus.io/tag"
-)
-
-// View allows users to aggregate the recorded stats.Measurements.
-// Views need to be passed to the Register function before data will be
-// collected and sent to Exporters.
-type View struct {
- Name string // Name of View. Must be unique. If unset, will default to the name of the Measure.
- Description string // Description is a human-readable description for this view.
-
- // TagKeys are the tag keys describing the grouping of this view.
- // A single Row will be produced for each combination of associated tag values.
- TagKeys []tag.Key
-
- // Measure is a stats.Measure to aggregate in this view.
- Measure stats.Measure
-
- // Aggregation is the aggregation function to apply to the set of Measurements.
- Aggregation *Aggregation
-}
-
-// WithName returns a copy of the View with a new name. This is useful for
-// renaming views to cope with limitations placed on metric names by various
-// backends.
-func (v *View) WithName(name string) *View {
- vNew := *v
- vNew.Name = name
- return &vNew
-}
-
-// same compares two views and returns true if they represent the same aggregation.
-func (v *View) same(other *View) bool {
- if v == other {
- return true
- }
- if v == nil {
- return false
- }
- return reflect.DeepEqual(v.Aggregation, other.Aggregation) &&
- v.Measure.Name() == other.Measure.Name()
-}
-
-// ErrNegativeBucketBounds error returned if histogram contains negative bounds.
-//
-// Deprecated: this should not be public.
-var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported")
-
-// canonicalize canonicalizes v by setting explicit
-// defaults for Name and Description and sorting the TagKeys
-func (v *View) canonicalize() error {
- if v.Measure == nil {
- return fmt.Errorf("cannot register view %q: measure not set", v.Name)
- }
- if v.Aggregation == nil {
- return fmt.Errorf("cannot register view %q: aggregation not set", v.Name)
- }
- if v.Name == "" {
- v.Name = v.Measure.Name()
- }
- if v.Description == "" {
- v.Description = v.Measure.Description()
- }
- if err := checkViewName(v.Name); err != nil {
- return err
- }
- sort.Slice(v.TagKeys, func(i, j int) bool {
- return v.TagKeys[i].Name() < v.TagKeys[j].Name()
- })
- sort.Float64s(v.Aggregation.Buckets)
- for _, b := range v.Aggregation.Buckets {
- if b < 0 {
- return ErrNegativeBucketBounds
- }
- }
- // drop 0 bucket silently.
- v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...)
-
- return nil
-}
-
-func dropZeroBounds(bounds ...float64) []float64 {
- for i, bound := range bounds {
- if bound > 0 {
- return bounds[i:]
- }
- }
- return []float64{}
-}
-
-// viewInternal is the internal representation of a View.
-type viewInternal struct {
- view *View // view is the canonicalized View definition associated with this view.
- subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access
- collector *collector
- metricDescriptor *metricdata.Descriptor
-}
-
-func newViewInternal(v *View) (*viewInternal, error) {
- return &viewInternal{
- view: v,
- collector: &collector{make(map[string]AggregationData), v.Aggregation},
- metricDescriptor: viewToMetricDescriptor(v),
- }, nil
-}
-
-func (v *viewInternal) subscribe() {
- atomic.StoreUint32(&v.subscribed, 1)
-}
-
-func (v *viewInternal) unsubscribe() {
- atomic.StoreUint32(&v.subscribed, 0)
-}
-
-// isSubscribed returns true if the view is exporting
-// data by subscription.
-func (v *viewInternal) isSubscribed() bool {
- return atomic.LoadUint32(&v.subscribed) == 1
-}
-
-func (v *viewInternal) clearRows() {
- v.collector.clearRows()
-}
-
-func (v *viewInternal) collectedRows() []*Row {
- return v.collector.collectedRows(v.view.TagKeys)
-}
-
-func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) {
- if !v.isSubscribed() {
- return
- }
- sig := string(encodeWithKeys(m, v.view.TagKeys))
- v.collector.addSample(sig, val, attachments, t)
-}
-
-// A Data is a set of rows about usage of the single measure associated
-// with the given view. Each row is specific to a unique set of tags.
-type Data struct {
- View *View
- Start, End time.Time
- Rows []*Row
-}
-
-// Row is the collected value for a specific set of key value pairs a.k.a tags.
-type Row struct {
- Tags []tag.Tag
- Data AggregationData
-}
-
-func (r *Row) String() string {
- var buffer bytes.Buffer
- buffer.WriteString("{ ")
- buffer.WriteString("{ ")
- for _, t := range r.Tags {
- buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value))
- }
- buffer.WriteString(" }")
- buffer.WriteString(fmt.Sprintf("%v", r.Data))
- buffer.WriteString(" }")
- return buffer.String()
-}
-
-// Equal returns true if both rows are equal. Tags are expected to be ordered
-// by the key name. Even if both rows have the same tags but the tags appear in
-// different orders it will return false.
-func (r *Row) Equal(other *Row) bool {
- if r == other {
- return true
- }
- return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data)
-}
-
-const maxNameLength = 255
-
-// Returns true if the given string contains only printable characters.
-func isPrintable(str string) bool {
- for _, r := range str {
- if !(r >= ' ' && r <= '~') {
- return false
- }
- }
- return true
-}
-
-func checkViewName(name string) error {
- if len(name) > maxNameLength {
- return fmt.Errorf("view name cannot be larger than %v", maxNameLength)
- }
- if !isPrintable(name) {
- return fmt.Errorf("view name needs to be an ASCII string")
- }
- return nil
-}
diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go
deleted file mode 100644
index 57d615ec7..000000000
--- a/vendor/go.opencensus.io/stats/view/view_to_metric.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import (
- "time"
-
- "go.opencensus.io/resource"
-
- "go.opencensus.io/metric/metricdata"
- "go.opencensus.io/stats"
-)
-
-func getUnit(unit string) metricdata.Unit {
- switch unit {
- case "1":
- return metricdata.UnitDimensionless
- case "ms":
- return metricdata.UnitMilliseconds
- case "By":
- return metricdata.UnitBytes
- }
- return metricdata.UnitDimensionless
-}
-
-func getType(v *View) metricdata.Type {
- m := v.Measure
- agg := v.Aggregation
-
- switch agg.Type {
- case AggTypeSum:
- switch m.(type) {
- case *stats.Int64Measure:
- return metricdata.TypeCumulativeInt64
- case *stats.Float64Measure:
- return metricdata.TypeCumulativeFloat64
- default:
- panic("unexpected measure type")
- }
- case AggTypeDistribution:
- return metricdata.TypeCumulativeDistribution
- case AggTypeLastValue:
- switch m.(type) {
- case *stats.Int64Measure:
- return metricdata.TypeGaugeInt64
- case *stats.Float64Measure:
- return metricdata.TypeGaugeFloat64
- default:
- panic("unexpected measure type")
- }
- case AggTypeCount:
- switch m.(type) {
- case *stats.Int64Measure:
- return metricdata.TypeCumulativeInt64
- case *stats.Float64Measure:
- return metricdata.TypeCumulativeInt64
- default:
- panic("unexpected measure type")
- }
- default:
- panic("unexpected aggregation type")
- }
-}
-
-func getLabelKeys(v *View) []metricdata.LabelKey {
- labelKeys := []metricdata.LabelKey{}
- for _, k := range v.TagKeys {
- labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()})
- }
- return labelKeys
-}
-
-func viewToMetricDescriptor(v *View) *metricdata.Descriptor {
- return &metricdata.Descriptor{
- Name: v.Name,
- Description: v.Description,
- Unit: convertUnit(v),
- Type: getType(v),
- LabelKeys: getLabelKeys(v),
- }
-}
-
-func convertUnit(v *View) metricdata.Unit {
- switch v.Aggregation.Type {
- case AggTypeCount:
- return metricdata.UnitDimensionless
- default:
- return getUnit(v.Measure.Unit())
- }
-}
-
-func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue {
- labelValues := []metricdata.LabelValue{}
- tagMap := make(map[string]string)
- for _, tag := range row.Tags {
- tagMap[tag.Key.Name()] = tag.Value
- }
-
- for _, key := range expectedKeys {
- if val, ok := tagMap[key.Key]; ok {
- labelValues = append(labelValues, metricdata.NewLabelValue(val))
- } else {
- labelValues = append(labelValues, metricdata.LabelValue{})
- }
- }
- return labelValues
-}
-
-func rowToTimeseries(v *viewInternal, row *Row, now time.Time) *metricdata.TimeSeries {
- return &metricdata.TimeSeries{
- Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)},
- LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys),
- StartTime: row.Data.StartTime(),
- }
-}
-
-func viewToMetric(v *viewInternal, r *resource.Resource, now time.Time) *metricdata.Metric {
- rows := v.collectedRows()
- if len(rows) == 0 {
- return nil
- }
-
- ts := []*metricdata.TimeSeries{}
- for _, row := range rows {
- ts = append(ts, rowToTimeseries(v, row, now))
- }
-
- m := &metricdata.Metric{
- Descriptor: *v.metricDescriptor,
- TimeSeries: ts,
- Resource: r,
- }
- return m
-}
diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go
deleted file mode 100644
index 6a79cd8a3..000000000
--- a/vendor/go.opencensus.io/stats/view/worker.go
+++ /dev/null
@@ -1,424 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import (
- "fmt"
- "sync"
- "time"
-
- "go.opencensus.io/resource"
-
- "go.opencensus.io/metric/metricdata"
- "go.opencensus.io/metric/metricproducer"
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/internal"
- "go.opencensus.io/tag"
-)
-
-func init() {
- defaultWorker = NewMeter().(*worker)
- go defaultWorker.start()
- internal.DefaultRecorder = record
- internal.MeasurementRecorder = recordMeasurement
-}
-
-type measureRef struct {
- measure string
- views map[*viewInternal]struct{}
-}
-
-type worker struct {
- measures map[string]*measureRef
- views map[string]*viewInternal
- viewStartTimes map[*viewInternal]time.Time
-
- timer *time.Ticker
- c chan command
- quit, done chan bool
- mu sync.RWMutex
- r *resource.Resource
-
- exportersMu sync.RWMutex
- exporters map[Exporter]struct{}
-}
-
-// Meter defines an interface which allows a single process to maintain
-// multiple sets of metrics exports (intended for the advanced case where a
-// single process wants to report metrics about multiple objects, such as
-// multiple databases or HTTP services).
-//
-// Note that this is an advanced use case, and the static functions in this
-// module should cover the common use cases.
-type Meter interface {
- stats.Recorder
- // Find returns a registered view associated with this name.
- // If no registered view is found, nil is returned.
- Find(name string) *View
- // Register begins collecting data for the given views.
- // Once a view is registered, it reports data to the registered exporters.
- Register(views ...*View) error
- // Unregister the given views. Data will not longer be exported for these views
- // after Unregister returns.
- // It is not necessary to unregister from views you expect to collect for the
- // duration of your program execution.
- Unregister(views ...*View)
- // SetReportingPeriod sets the interval between reporting aggregated views in
- // the program. If duration is less than or equal to zero, it enables the
- // default behavior.
- //
- // Note: each exporter makes different promises about what the lowest supported
- // duration is. For example, the Stackdriver exporter recommends a value no
- // lower than 1 minute. Consult each exporter per your needs.
- SetReportingPeriod(time.Duration)
-
- // RegisterExporter registers an exporter.
- // Collected data will be reported via all the
- // registered exporters. Once you no longer
- // want data to be exported, invoke UnregisterExporter
- // with the previously registered exporter.
- //
- // Binaries can register exporters, libraries shouldn't register exporters.
- RegisterExporter(Exporter)
- // UnregisterExporter unregisters an exporter.
- UnregisterExporter(Exporter)
- // SetResource may be used to set the Resource associated with this registry.
- // This is intended to be used in cases where a single process exports metrics
- // for multiple Resources, typically in a multi-tenant situation.
- SetResource(*resource.Resource)
-
- // Start causes the Meter to start processing Record calls and aggregating
- // statistics as well as exporting data.
- Start()
- // Stop causes the Meter to stop processing calls and terminate data export.
- Stop()
-
- // RetrieveData gets a snapshot of the data collected for the the view registered
- // with the given name. It is intended for testing only.
- RetrieveData(viewName string) ([]*Row, error)
-}
-
-var _ Meter = (*worker)(nil)
-
-var defaultWorker *worker
-
-var defaultReportingDuration = 10 * time.Second
-
-// Find returns a registered view associated with this name.
-// If no registered view is found, nil is returned.
-func Find(name string) (v *View) {
- return defaultWorker.Find(name)
-}
-
-// Find returns a registered view associated with this name.
-// If no registered view is found, nil is returned.
-func (w *worker) Find(name string) (v *View) {
- req := &getViewByNameReq{
- name: name,
- c: make(chan *getViewByNameResp),
- }
- w.c <- req
- resp := <-req.c
- return resp.v
-}
-
-// Register begins collecting data for the given views.
-// Once a view is registered, it reports data to the registered exporters.
-func Register(views ...*View) error {
- return defaultWorker.Register(views...)
-}
-
-// Register begins collecting data for the given views.
-// Once a view is registered, it reports data to the registered exporters.
-func (w *worker) Register(views ...*View) error {
- req := ®isterViewReq{
- views: views,
- err: make(chan error),
- }
- w.c <- req
- return <-req.err
-}
-
-// Unregister the given views. Data will not longer be exported for these views
-// after Unregister returns.
-// It is not necessary to unregister from views you expect to collect for the
-// duration of your program execution.
-func Unregister(views ...*View) {
- defaultWorker.Unregister(views...)
-}
-
-// Unregister the given views. Data will not longer be exported for these views
-// after Unregister returns.
-// It is not necessary to unregister from views you expect to collect for the
-// duration of your program execution.
-func (w *worker) Unregister(views ...*View) {
- names := make([]string, len(views))
- for i := range views {
- names[i] = views[i].Name
- }
- req := &unregisterFromViewReq{
- views: names,
- done: make(chan struct{}),
- }
- w.c <- req
- <-req.done
-}
-
-// RetrieveData gets a snapshot of the data collected for the the view registered
-// with the given name. It is intended for testing only.
-func RetrieveData(viewName string) ([]*Row, error) {
- return defaultWorker.RetrieveData(viewName)
-}
-
-// RetrieveData gets a snapshot of the data collected for the the view registered
-// with the given name. It is intended for testing only.
-func (w *worker) RetrieveData(viewName string) ([]*Row, error) {
- req := &retrieveDataReq{
- now: time.Now(),
- v: viewName,
- c: make(chan *retrieveDataResp),
- }
- w.c <- req
- resp := <-req.c
- return resp.rows, resp.err
-}
-
-func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) {
- defaultWorker.Record(tags, ms, attachments)
-}
-
-func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) {
- defaultWorker.recordMeasurement(tags, ms, attachments)
-}
-
-// Record records a set of measurements ms associated with the given tags and attachments.
-func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) {
- w.recordMeasurement(tags, ms.([]stats.Measurement), attachments)
-}
-
-// recordMeasurement records a set of measurements ms associated with the given tags and attachments.
-// This is the same as Record but without an interface{} type to avoid allocations
-func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) {
- req := &recordReq{
- tm: tags,
- ms: ms,
- attachments: attachments,
- t: time.Now(),
- }
- w.c <- req
-}
-
-// SetReportingPeriod sets the interval between reporting aggregated views in
-// the program. If duration is less than or equal to zero, it enables the
-// default behavior.
-//
-// Note: each exporter makes different promises about what the lowest supported
-// duration is. For example, the Stackdriver exporter recommends a value no
-// lower than 1 minute. Consult each exporter per your needs.
-func SetReportingPeriod(d time.Duration) {
- defaultWorker.SetReportingPeriod(d)
-}
-
-// Stop stops the default worker.
-func Stop() {
- defaultWorker.Stop()
-}
-
-// SetReportingPeriod sets the interval between reporting aggregated views in
-// the program. If duration is less than or equal to zero, it enables the
-// default behavior.
-//
-// Note: each exporter makes different promises about what the lowest supported
-// duration is. For example, the Stackdriver exporter recommends a value no
-// lower than 1 minute. Consult each exporter per your needs.
-func (w *worker) SetReportingPeriod(d time.Duration) {
- // TODO(acetechnologist): ensure that the duration d is more than a certain
- // value. e.g. 1s
- req := &setReportingPeriodReq{
- d: d,
- c: make(chan bool),
- }
- w.c <- req
- <-req.c // don't return until the timer is set to the new duration.
-}
-
-// NewMeter constructs a Meter instance. You should only need to use this if
-// you need to separate out Measurement recordings and View aggregations within
-// a single process.
-func NewMeter() Meter {
- return &worker{
- measures: make(map[string]*measureRef),
- views: make(map[string]*viewInternal),
- viewStartTimes: make(map[*viewInternal]time.Time),
- timer: time.NewTicker(defaultReportingDuration),
- c: make(chan command, 1024),
- quit: make(chan bool),
- done: make(chan bool),
-
- exporters: make(map[Exporter]struct{}),
- }
-}
-
-// SetResource associates all data collected by this Meter with the specified
-// resource. This resource is reported when using metricexport.ReadAndExport;
-// it is not provided when used with ExportView/RegisterExporter, because that
-// interface does not provide a means for reporting the Resource.
-func (w *worker) SetResource(r *resource.Resource) {
- w.r = r
-}
-
-func (w *worker) Start() {
- go w.start()
-}
-
-func (w *worker) start() {
- prodMgr := metricproducer.GlobalManager()
- prodMgr.AddProducer(w)
-
- for {
- select {
- case cmd := <-w.c:
- cmd.handleCommand(w)
- case <-w.timer.C:
- w.reportUsage()
- case <-w.quit:
- w.timer.Stop()
- close(w.c)
- close(w.done)
- return
- }
- }
-}
-
-func (w *worker) Stop() {
- prodMgr := metricproducer.GlobalManager()
- prodMgr.DeleteProducer(w)
- select {
- case <-w.quit:
- default:
- close(w.quit)
- }
- <-w.done
-}
-
-func (w *worker) getMeasureRef(name string) *measureRef {
- if mr, ok := w.measures[name]; ok {
- return mr
- }
- mr := &measureRef{
- measure: name,
- views: make(map[*viewInternal]struct{}),
- }
- w.measures[name] = mr
- return mr
-}
-
-func (w *worker) tryRegisterView(v *View) (*viewInternal, error) {
- w.mu.Lock()
- defer w.mu.Unlock()
- vi, err := newViewInternal(v)
- if err != nil {
- return nil, err
- }
- if x, ok := w.views[vi.view.Name]; ok {
- if !x.view.same(vi.view) {
- return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name)
- }
-
- // the view is already registered so there is nothing to do and the
- // command is considered successful.
- return x, nil
- }
- w.views[vi.view.Name] = vi
- w.viewStartTimes[vi] = time.Now()
- ref := w.getMeasureRef(vi.view.Measure.Name())
- ref.views[vi] = struct{}{}
- return vi, nil
-}
-
-func (w *worker) unregisterView(v *viewInternal) {
- w.mu.Lock()
- defer w.mu.Unlock()
- delete(w.views, v.view.Name)
- delete(w.viewStartTimes, v)
- if measure := w.measures[v.view.Measure.Name()]; measure != nil {
- delete(measure.views, v)
- }
-}
-
-func (w *worker) reportView(v *viewInternal) {
- if !v.isSubscribed() {
- return
- }
- rows := v.collectedRows()
- viewData := &Data{
- View: v.view,
- Start: w.viewStartTimes[v],
- End: time.Now(),
- Rows: rows,
- }
- w.exportersMu.Lock()
- defer w.exportersMu.Unlock()
- for e := range w.exporters {
- e.ExportView(viewData)
- }
-}
-
-func (w *worker) reportUsage() {
- w.mu.Lock()
- defer w.mu.Unlock()
- for _, v := range w.views {
- w.reportView(v)
- }
-}
-
-func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric {
- if !v.isSubscribed() {
- return nil
- }
-
- return viewToMetric(v, w.r, now)
-}
-
-// Read reads all view data and returns them as metrics.
-// It is typically invoked by metric reader to export stats in metric format.
-func (w *worker) Read() []*metricdata.Metric {
- w.mu.Lock()
- defer w.mu.Unlock()
- now := time.Now()
- metrics := make([]*metricdata.Metric, 0, len(w.views))
- for _, v := range w.views {
- metric := w.toMetric(v, now)
- if metric != nil {
- metrics = append(metrics, metric)
- }
- }
- return metrics
-}
-
-func (w *worker) RegisterExporter(e Exporter) {
- w.exportersMu.Lock()
- defer w.exportersMu.Unlock()
-
- w.exporters[e] = struct{}{}
-}
-
-func (w *worker) UnregisterExporter(e Exporter) {
- w.exportersMu.Lock()
- defer w.exportersMu.Unlock()
-
- delete(w.exporters, e)
-}
diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go
deleted file mode 100644
index 9ac4cc059..000000000
--- a/vendor/go.opencensus.io/stats/view/worker_commands.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import (
- "errors"
- "fmt"
- "strings"
- "time"
-
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/internal"
- "go.opencensus.io/tag"
-)
-
-type command interface {
- handleCommand(w *worker)
-}
-
-// getViewByNameReq is the command to get a view given its name.
-type getViewByNameReq struct {
- name string
- c chan *getViewByNameResp
-}
-
-type getViewByNameResp struct {
- v *View
-}
-
-func (cmd *getViewByNameReq) handleCommand(w *worker) {
- v := w.views[cmd.name]
- if v == nil {
- cmd.c <- &getViewByNameResp{nil}
- return
- }
- cmd.c <- &getViewByNameResp{v.view}
-}
-
-// registerViewReq is the command to register a view.
-type registerViewReq struct {
- views []*View
- err chan error
-}
-
-func (cmd *registerViewReq) handleCommand(w *worker) {
- for _, v := range cmd.views {
- if err := v.canonicalize(); err != nil {
- cmd.err <- err
- return
- }
- }
- var errstr []string
- for _, view := range cmd.views {
- vi, err := w.tryRegisterView(view)
- if err != nil {
- errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err))
- continue
- }
- internal.SubscriptionReporter(view.Measure.Name())
- vi.subscribe()
- }
- if len(errstr) > 0 {
- cmd.err <- errors.New(strings.Join(errstr, "\n"))
- } else {
- cmd.err <- nil
- }
-}
-
-// unregisterFromViewReq is the command to unregister to a view. Has no
-// impact on the data collection for client that are pulling data from the
-// library.
-type unregisterFromViewReq struct {
- views []string
- done chan struct{}
-}
-
-func (cmd *unregisterFromViewReq) handleCommand(w *worker) {
- for _, name := range cmd.views {
- vi, ok := w.views[name]
- if !ok {
- continue
- }
-
- // Report pending data for this view before removing it.
- w.reportView(vi)
-
- vi.unsubscribe()
- if !vi.isSubscribed() {
- // this was the last subscription and view is not collecting anymore.
- // The collected data can be cleared.
- vi.clearRows()
- }
- w.unregisterView(vi)
- }
- cmd.done <- struct{}{}
-}
-
-// retrieveDataReq is the command to retrieve data for a view.
-type retrieveDataReq struct {
- now time.Time
- v string
- c chan *retrieveDataResp
-}
-
-type retrieveDataResp struct {
- rows []*Row
- err error
-}
-
-func (cmd *retrieveDataReq) handleCommand(w *worker) {
- w.mu.Lock()
- defer w.mu.Unlock()
- vi, ok := w.views[cmd.v]
- if !ok {
- cmd.c <- &retrieveDataResp{
- nil,
- fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v),
- }
- return
- }
-
- if !vi.isSubscribed() {
- cmd.c <- &retrieveDataResp{
- nil,
- fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v),
- }
- return
- }
- cmd.c <- &retrieveDataResp{
- vi.collectedRows(),
- nil,
- }
-}
-
-// recordReq is the command to record data related to multiple measures
-// at once.
-type recordReq struct {
- tm *tag.Map
- ms []stats.Measurement
- attachments map[string]interface{}
- t time.Time
-}
-
-func (cmd *recordReq) handleCommand(w *worker) {
- w.mu.Lock()
- defer w.mu.Unlock()
- for _, m := range cmd.ms {
- if (m == stats.Measurement{}) { // not registered
- continue
- }
- ref := w.getMeasureRef(m.Measure().Name())
- for v := range ref.views {
- v.addSample(cmd.tm, m.Value(), cmd.attachments, cmd.t)
- }
- }
-}
-
-// setReportingPeriodReq is the command to modify the duration between
-// reporting the collected data to the registered clients.
-type setReportingPeriodReq struct {
- d time.Duration
- c chan bool
-}
-
-func (cmd *setReportingPeriodReq) handleCommand(w *worker) {
- w.timer.Stop()
- if cmd.d <= 0 {
- w.timer = time.NewTicker(defaultReportingDuration)
- } else {
- w.timer = time.NewTicker(cmd.d)
- }
- cmd.c <- true
-}
diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go
deleted file mode 100644
index b27d1b26b..000000000
--- a/vendor/go.opencensus.io/tag/context.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package tag
-
-import (
- "context"
-)
-
-// FromContext returns the tag map stored in the context.
-func FromContext(ctx context.Context) *Map {
- // The returned tag map shouldn't be mutated.
- ts := ctx.Value(mapCtxKey)
- if ts == nil {
- return nil
- }
- return ts.(*Map)
-}
-
-// NewContext creates a new context with the given tag map.
-// To propagate a tag map to downstream methods and downstream RPCs, add a tag map
-// to the current context. NewContext will return a copy of the current context,
-// and put the tag map into the returned one.
-// If there is already a tag map in the current context, it will be replaced with m.
-func NewContext(ctx context.Context, m *Map) context.Context {
- return context.WithValue(ctx, mapCtxKey, m)
-}
-
-type ctxKey struct{}
-
-var mapCtxKey = ctxKey{}
diff --git a/vendor/go.opencensus.io/tag/doc.go b/vendor/go.opencensus.io/tag/doc.go
deleted file mode 100644
index da16b74e4..000000000
--- a/vendor/go.opencensus.io/tag/doc.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-/*
-Package tag contains OpenCensus tags.
-
-Tags are key-value pairs. Tags provide additional cardinality to
-the OpenCensus instrumentation data.
-
-Tags can be propagated on the wire and in the same
-process via context.Context. Encode and Decode should be
-used to represent tags into their binary propagation form.
-*/
-package tag // import "go.opencensus.io/tag"
diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go
deleted file mode 100644
index 71ec91365..000000000
--- a/vendor/go.opencensus.io/tag/key.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package tag
-
-// Key represents a tag key.
-type Key struct {
- name string
-}
-
-// NewKey creates or retrieves a string key identified by name.
-// Calling NewKey more than once with the same name returns the same key.
-func NewKey(name string) (Key, error) {
- if !checkKeyName(name) {
- return Key{}, errInvalidKeyName
- }
- return Key{name: name}, nil
-}
-
-// MustNewKey returns a key with the given name, and panics if name is an invalid key name.
-func MustNewKey(name string) Key {
- k, err := NewKey(name)
- if err != nil {
- panic(err)
- }
- return k
-}
-
-// Name returns the name of the key.
-func (k Key) Name() string {
- return k.name
-}
diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go
deleted file mode 100644
index 0272ef85a..000000000
--- a/vendor/go.opencensus.io/tag/map.go
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package tag
-
-import (
- "bytes"
- "context"
- "fmt"
- "sort"
-)
-
-// Tag is a key value pair that can be propagated on wire.
-type Tag struct {
- Key Key
- Value string
-}
-
-type tagContent struct {
- value string
- m metadatas
-}
-
-// Map is a map of tags. Use New to create a context containing
-// a new Map.
-type Map struct {
- m map[Key]tagContent
-}
-
-// Value returns the value for the key if a value for the key exists.
-func (m *Map) Value(k Key) (string, bool) {
- if m == nil {
- return "", false
- }
- v, ok := m.m[k]
- return v.value, ok
-}
-
-func (m *Map) String() string {
- if m == nil {
- return "nil"
- }
- keys := make([]Key, 0, len(m.m))
- for k := range m.m {
- keys = append(keys, k)
- }
- sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() })
-
- var buffer bytes.Buffer
- buffer.WriteString("{ ")
- for _, k := range keys {
- buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k]))
- }
- buffer.WriteString(" }")
- return buffer.String()
-}
-
-func (m *Map) insert(k Key, v string, md metadatas) {
- if _, ok := m.m[k]; ok {
- return
- }
- m.m[k] = tagContent{value: v, m: md}
-}
-
-func (m *Map) update(k Key, v string, md metadatas) {
- if _, ok := m.m[k]; ok {
- m.m[k] = tagContent{value: v, m: md}
- }
-}
-
-func (m *Map) upsert(k Key, v string, md metadatas) {
- m.m[k] = tagContent{value: v, m: md}
-}
-
-func (m *Map) delete(k Key) {
- delete(m.m, k)
-}
-
-func newMap() *Map {
- return &Map{m: make(map[Key]tagContent)}
-}
-
-// Mutator modifies a tag map.
-type Mutator interface {
- Mutate(t *Map) (*Map, error)
-}
-
-// Insert returns a mutator that inserts a
-// value associated with k. If k already exists in the tag map,
-// mutator doesn't update the value.
-// Metadata applies metadata to the tag. It is optional.
-// Metadatas are applied in the order in which it is provided.
-// If more than one metadata updates the same attribute then
-// the update from the last metadata prevails.
-func Insert(k Key, v string, mds ...Metadata) Mutator {
- return &mutator{
- fn: func(m *Map) (*Map, error) {
- if !checkValue(v) {
- return nil, errInvalidValue
- }
- m.insert(k, v, createMetadatas(mds...))
- return m, nil
- },
- }
-}
-
-// Update returns a mutator that updates the
-// value of the tag associated with k with v. If k doesn't
-// exists in the tag map, the mutator doesn't insert the value.
-// Metadata applies metadata to the tag. It is optional.
-// Metadatas are applied in the order in which it is provided.
-// If more than one metadata updates the same attribute then
-// the update from the last metadata prevails.
-func Update(k Key, v string, mds ...Metadata) Mutator {
- return &mutator{
- fn: func(m *Map) (*Map, error) {
- if !checkValue(v) {
- return nil, errInvalidValue
- }
- m.update(k, v, createMetadatas(mds...))
- return m, nil
- },
- }
-}
-
-// Upsert returns a mutator that upserts the
-// value of the tag associated with k with v. It inserts the
-// value if k doesn't exist already. It mutates the value
-// if k already exists.
-// Metadata applies metadata to the tag. It is optional.
-// Metadatas are applied in the order in which it is provided.
-// If more than one metadata updates the same attribute then
-// the update from the last metadata prevails.
-func Upsert(k Key, v string, mds ...Metadata) Mutator {
- return &mutator{
- fn: func(m *Map) (*Map, error) {
- if !checkValue(v) {
- return nil, errInvalidValue
- }
- m.upsert(k, v, createMetadatas(mds...))
- return m, nil
- },
- }
-}
-
-func createMetadatas(mds ...Metadata) metadatas {
- var metas metadatas
- if len(mds) > 0 {
- for _, md := range mds {
- if md != nil {
- md(&metas)
- }
- }
- } else {
- WithTTL(TTLUnlimitedPropagation)(&metas)
- }
- return metas
-
-}
-
-// Delete returns a mutator that deletes
-// the value associated with k.
-func Delete(k Key) Mutator {
- return &mutator{
- fn: func(m *Map) (*Map, error) {
- m.delete(k)
- return m, nil
- },
- }
-}
-
-// New returns a new context that contains a tag map
-// originated from the incoming context and modified
-// with the provided mutators.
-func New(ctx context.Context, mutator ...Mutator) (context.Context, error) {
- m := newMap()
- orig := FromContext(ctx)
- if orig != nil {
- for k, v := range orig.m {
- if !checkKeyName(k.Name()) {
- return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName)
- }
- if !checkValue(v.value) {
- return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue)
- }
- m.insert(k, v.value, v.m)
- }
- }
- var err error
- for _, mod := range mutator {
- m, err = mod.Mutate(m)
- if err != nil {
- return ctx, err
- }
- }
- return NewContext(ctx, m), nil
-}
-
-// Do is similar to pprof.Do: a convenience for installing the tags
-// from the context as Go profiler labels. This allows you to
-// correlated runtime profiling with stats.
-//
-// It converts the key/values from the given map to Go profiler labels
-// and calls pprof.Do.
-//
-// Do is going to do nothing if your Go version is below 1.9.
-func Do(ctx context.Context, f func(ctx context.Context)) {
- do(ctx, f)
-}
-
-type mutator struct {
- fn func(t *Map) (*Map, error)
-}
-
-func (m *mutator) Mutate(t *Map) (*Map, error) {
- return m.fn(t)
-}
diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go
deleted file mode 100644
index c242e695c..000000000
--- a/vendor/go.opencensus.io/tag/map_codec.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package tag
-
-import (
- "encoding/binary"
- "fmt"
-)
-
-// KeyType defines the types of keys allowed. Currently only keyTypeString is
-// supported.
-type keyType byte
-
-const (
- keyTypeString keyType = iota
- keyTypeInt64
- keyTypeTrue
- keyTypeFalse
-
- tagsVersionID = byte(0)
-)
-
-type encoderGRPC struct {
- buf []byte
- writeIdx, readIdx int
-}
-
-// writeKeyString writes the fieldID '0' followed by the key string and value
-// string.
-func (eg *encoderGRPC) writeTagString(k, v string) {
- eg.writeByte(byte(keyTypeString))
- eg.writeStringWithVarintLen(k)
- eg.writeStringWithVarintLen(v)
-}
-
-func (eg *encoderGRPC) writeTagUint64(k string, i uint64) {
- eg.writeByte(byte(keyTypeInt64))
- eg.writeStringWithVarintLen(k)
- eg.writeUint64(i)
-}
-
-func (eg *encoderGRPC) writeTagTrue(k string) {
- eg.writeByte(byte(keyTypeTrue))
- eg.writeStringWithVarintLen(k)
-}
-
-func (eg *encoderGRPC) writeTagFalse(k string) {
- eg.writeByte(byte(keyTypeFalse))
- eg.writeStringWithVarintLen(k)
-}
-
-func (eg *encoderGRPC) writeBytesWithVarintLen(bytes []byte) {
- length := len(bytes)
-
- eg.growIfRequired(binary.MaxVarintLen64 + length)
- eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length))
- copy(eg.buf[eg.writeIdx:], bytes)
- eg.writeIdx += length
-}
-
-func (eg *encoderGRPC) writeStringWithVarintLen(s string) {
- length := len(s)
-
- eg.growIfRequired(binary.MaxVarintLen64 + length)
- eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length))
- copy(eg.buf[eg.writeIdx:], s)
- eg.writeIdx += length
-}
-
-func (eg *encoderGRPC) writeByte(v byte) {
- eg.growIfRequired(1)
- eg.buf[eg.writeIdx] = v
- eg.writeIdx++
-}
-
-func (eg *encoderGRPC) writeUint32(i uint32) {
- eg.growIfRequired(4)
- binary.LittleEndian.PutUint32(eg.buf[eg.writeIdx:], i)
- eg.writeIdx += 4
-}
-
-func (eg *encoderGRPC) writeUint64(i uint64) {
- eg.growIfRequired(8)
- binary.LittleEndian.PutUint64(eg.buf[eg.writeIdx:], i)
- eg.writeIdx += 8
-}
-
-func (eg *encoderGRPC) readByte() byte {
- b := eg.buf[eg.readIdx]
- eg.readIdx++
- return b
-}
-
-func (eg *encoderGRPC) readUint32() uint32 {
- i := binary.LittleEndian.Uint32(eg.buf[eg.readIdx:])
- eg.readIdx += 4
- return i
-}
-
-func (eg *encoderGRPC) readUint64() uint64 {
- i := binary.LittleEndian.Uint64(eg.buf[eg.readIdx:])
- eg.readIdx += 8
- return i
-}
-
-func (eg *encoderGRPC) readBytesWithVarintLen() ([]byte, error) {
- if eg.readEnded() {
- return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx)
- }
- length, valueStart := binary.Uvarint(eg.buf[eg.readIdx:])
- if valueStart <= 0 {
- return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx)
- }
-
- valueStart += eg.readIdx
- valueEnd := valueStart + int(length)
- if valueEnd > len(eg.buf) {
- return nil, fmt.Errorf("malformed encoding: length:%v, upper:%v, maxLength:%v", length, valueEnd, len(eg.buf))
- }
-
- eg.readIdx = valueEnd
- return eg.buf[valueStart:valueEnd], nil
-}
-
-func (eg *encoderGRPC) readStringWithVarintLen() (string, error) {
- bytes, err := eg.readBytesWithVarintLen()
- if err != nil {
- return "", err
- }
- return string(bytes), nil
-}
-
-func (eg *encoderGRPC) growIfRequired(expected int) {
- if len(eg.buf)-eg.writeIdx < expected {
- tmp := make([]byte, 2*(len(eg.buf)+1)+expected)
- copy(tmp, eg.buf)
- eg.buf = tmp
- }
-}
-
-func (eg *encoderGRPC) readEnded() bool {
- return eg.readIdx >= len(eg.buf)
-}
-
-func (eg *encoderGRPC) bytes() []byte {
- return eg.buf[:eg.writeIdx]
-}
-
-// Encode encodes the tag map into a []byte. It is useful to propagate
-// the tag maps on wire in binary format.
-func Encode(m *Map) []byte {
- if m == nil {
- return nil
- }
- eg := &encoderGRPC{
- buf: make([]byte, len(m.m)),
- }
- eg.writeByte(tagsVersionID)
- for k, v := range m.m {
- if v.m.ttl.ttl == valueTTLUnlimitedPropagation {
- eg.writeByte(byte(keyTypeString))
- eg.writeStringWithVarintLen(k.name)
- eg.writeBytesWithVarintLen([]byte(v.value))
- }
- }
- return eg.bytes()
-}
-
-// Decode decodes the given []byte into a tag map.
-func Decode(bytes []byte) (*Map, error) {
- ts := newMap()
- err := DecodeEach(bytes, ts.upsert)
- if err != nil {
- // no partial failures
- return nil, err
- }
- return ts, nil
-}
-
-// DecodeEach decodes the given serialized tag map, calling handler for each
-// tag key and value decoded.
-func DecodeEach(bytes []byte, fn func(key Key, val string, md metadatas)) error {
- eg := &encoderGRPC{
- buf: bytes,
- }
- if len(eg.buf) == 0 {
- return nil
- }
-
- version := eg.readByte()
- if version > tagsVersionID {
- return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID)
- }
-
- for !eg.readEnded() {
- typ := keyType(eg.readByte())
-
- if typ != keyTypeString {
- return fmt.Errorf("cannot decode: invalid key type: %q", typ)
- }
-
- k, err := eg.readBytesWithVarintLen()
- if err != nil {
- return err
- }
-
- v, err := eg.readBytesWithVarintLen()
- if err != nil {
- return err
- }
-
- key, err := NewKey(string(k))
- if err != nil {
- return err
- }
- val := string(v)
- if !checkValue(val) {
- return errInvalidValue
- }
- fn(key, val, createMetadatas(WithTTL(TTLUnlimitedPropagation)))
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/go.opencensus.io/tag/metadata.go b/vendor/go.opencensus.io/tag/metadata.go
deleted file mode 100644
index 6571a583e..000000000
--- a/vendor/go.opencensus.io/tag/metadata.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package tag
-
-const (
- // valueTTLNoPropagation prevents tag from propagating.
- valueTTLNoPropagation = 0
-
- // valueTTLUnlimitedPropagation allows tag to propagate without any limits on number of hops.
- valueTTLUnlimitedPropagation = -1
-)
-
-// TTL is metadata that specifies number of hops a tag can propagate.
-// Details about TTL metadata is specified at https://github.com/census-instrumentation/opencensus-specs/blob/master/tags/TagMap.md#tagmetadata
-type TTL struct {
- ttl int
-}
-
-var (
- // TTLUnlimitedPropagation is TTL metadata that allows tag to propagate without any limits on number of hops.
- TTLUnlimitedPropagation = TTL{ttl: valueTTLUnlimitedPropagation}
-
- // TTLNoPropagation is TTL metadata that prevents tag from propagating.
- TTLNoPropagation = TTL{ttl: valueTTLNoPropagation}
-)
-
-type metadatas struct {
- ttl TTL
-}
-
-// Metadata applies metadatas specified by the function.
-type Metadata func(*metadatas)
-
-// WithTTL applies metadata with provided ttl.
-func WithTTL(ttl TTL) Metadata {
- return func(m *metadatas) {
- m.ttl = ttl
- }
-}
diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go
deleted file mode 100644
index 8fb17226f..000000000
--- a/vendor/go.opencensus.io/tag/profile_19.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.9
-// +build go1.9
-
-package tag
-
-import (
- "context"
- "runtime/pprof"
-)
-
-func do(ctx context.Context, f func(ctx context.Context)) {
- m := FromContext(ctx)
- keyvals := make([]string, 0, 2*len(m.m))
- for k, v := range m.m {
- keyvals = append(keyvals, k.Name(), v.value)
- }
- pprof.Do(ctx, pprof.Labels(keyvals...), f)
-}
diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go
deleted file mode 100644
index e28cf13cd..000000000
--- a/vendor/go.opencensus.io/tag/profile_not19.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.9
-// +build !go1.9
-
-package tag
-
-import "context"
-
-func do(ctx context.Context, f func(ctx context.Context)) {
- f(ctx)
-}
diff --git a/vendor/go.opencensus.io/tag/validate.go b/vendor/go.opencensus.io/tag/validate.go
deleted file mode 100644
index 0939fc674..000000000
--- a/vendor/go.opencensus.io/tag/validate.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tag
-
-import "errors"
-
-const (
- maxKeyLength = 255
-
- // valid are restricted to US-ASCII subset (range 0x20 (' ') to 0x7e ('~')).
- validKeyValueMin = 32
- validKeyValueMax = 126
-)
-
-var (
- errInvalidKeyName = errors.New("invalid key name: only ASCII characters accepted; max length must be 255 characters")
- errInvalidValue = errors.New("invalid value: only ASCII characters accepted; max length must be 255 characters")
-)
-
-func checkKeyName(name string) bool {
- if len(name) == 0 {
- return false
- }
- if len(name) > maxKeyLength {
- return false
- }
- return isASCII(name)
-}
-
-func isASCII(s string) bool {
- for _, c := range s {
- if (c < validKeyValueMin) || (c > validKeyValueMax) {
- return false
- }
- }
- return true
-}
-
-func checkValue(v string) bool {
- if len(v) > maxKeyLength {
- return false
- }
- return isASCII(v)
-}
diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go
deleted file mode 100644
index c8e26ed63..000000000
--- a/vendor/go.opencensus.io/trace/basetypes.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "fmt"
- "time"
-)
-
-type (
- // TraceID is a 16-byte identifier for a set of spans.
- TraceID [16]byte
-
- // SpanID is an 8-byte identifier for a single span.
- SpanID [8]byte
-)
-
-func (t TraceID) String() string {
- return fmt.Sprintf("%02x", t[:])
-}
-
-func (s SpanID) String() string {
- return fmt.Sprintf("%02x", s[:])
-}
-
-// Annotation represents a text annotation with a set of attributes and a timestamp.
-type Annotation struct {
- Time time.Time
- Message string
- Attributes map[string]interface{}
-}
-
-// Attribute represents a key-value pair on a span, link or annotation.
-// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute.
-type Attribute struct {
- key string
- value interface{}
-}
-
-// Key returns the attribute's key
-func (a *Attribute) Key() string {
- return a.key
-}
-
-// Value returns the attribute's value
-func (a *Attribute) Value() interface{} {
- return a.value
-}
-
-// BoolAttribute returns a bool-valued attribute.
-func BoolAttribute(key string, value bool) Attribute {
- return Attribute{key: key, value: value}
-}
-
-// Int64Attribute returns an int64-valued attribute.
-func Int64Attribute(key string, value int64) Attribute {
- return Attribute{key: key, value: value}
-}
-
-// Float64Attribute returns a float64-valued attribute.
-func Float64Attribute(key string, value float64) Attribute {
- return Attribute{key: key, value: value}
-}
-
-// StringAttribute returns a string-valued attribute.
-func StringAttribute(key string, value string) Attribute {
- return Attribute{key: key, value: value}
-}
-
-// LinkType specifies the relationship between the span that had the link
-// added, and the linked span.
-type LinkType int32
-
-// LinkType values.
-const (
- LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown.
- LinkTypeChild // The linked span is a child of the current span.
- LinkTypeParent // The linked span is the parent of the current span.
-)
-
-// Link represents a reference from one span to another span.
-type Link struct {
- TraceID TraceID
- SpanID SpanID
- Type LinkType
- // Attributes is a set of attributes on the link.
- Attributes map[string]interface{}
-}
-
-// MessageEventType specifies the type of message event.
-type MessageEventType int32
-
-// MessageEventType values.
-const (
- MessageEventTypeUnspecified MessageEventType = iota // Unknown event type.
- MessageEventTypeSent // Indicates a sent RPC message.
- MessageEventTypeRecv // Indicates a received RPC message.
-)
-
-// MessageEvent represents an event describing a message sent or received on the network.
-type MessageEvent struct {
- Time time.Time
- EventType MessageEventType
- MessageID int64
- UncompressedByteSize int64
- CompressedByteSize int64
-}
-
-// Status is the status of a Span.
-type Status struct {
- // Code is a status code. Zero indicates success.
- //
- // If Code will be propagated to Google APIs, it ideally should be a value from
- // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto .
- Code int32
- Message string
-}
diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go
deleted file mode 100644
index 775f8274f..000000000
--- a/vendor/go.opencensus.io/trace/config.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "sync"
-
- "go.opencensus.io/trace/internal"
-)
-
-// Config represents the global tracing configuration.
-type Config struct {
- // DefaultSampler is the default sampler used when creating new spans.
- DefaultSampler Sampler
-
- // IDGenerator is for internal use only.
- IDGenerator internal.IDGenerator
-
- // MaxAnnotationEventsPerSpan is max number of annotation events per span
- MaxAnnotationEventsPerSpan int
-
- // MaxMessageEventsPerSpan is max number of message events per span
- MaxMessageEventsPerSpan int
-
- // MaxAnnotationEventsPerSpan is max number of attributes per span
- MaxAttributesPerSpan int
-
- // MaxLinksPerSpan is max number of links per span
- MaxLinksPerSpan int
-}
-
-var configWriteMu sync.Mutex
-
-const (
- // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span
- DefaultMaxAnnotationEventsPerSpan = 32
-
- // DefaultMaxMessageEventsPerSpan is default max number of message events per span
- DefaultMaxMessageEventsPerSpan = 128
-
- // DefaultMaxAttributesPerSpan is default max number of attributes per span
- DefaultMaxAttributesPerSpan = 32
-
- // DefaultMaxLinksPerSpan is default max number of links per span
- DefaultMaxLinksPerSpan = 32
-)
-
-// ApplyConfig applies changes to the global tracing configuration.
-//
-// Fields not provided in the given config are going to be preserved.
-func ApplyConfig(cfg Config) {
- configWriteMu.Lock()
- defer configWriteMu.Unlock()
- c := *config.Load().(*Config)
- if cfg.DefaultSampler != nil {
- c.DefaultSampler = cfg.DefaultSampler
- }
- if cfg.IDGenerator != nil {
- c.IDGenerator = cfg.IDGenerator
- }
- if cfg.MaxAnnotationEventsPerSpan > 0 {
- c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan
- }
- if cfg.MaxMessageEventsPerSpan > 0 {
- c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan
- }
- if cfg.MaxAttributesPerSpan > 0 {
- c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan
- }
- if cfg.MaxLinksPerSpan > 0 {
- c.MaxLinksPerSpan = cfg.MaxLinksPerSpan
- }
- config.Store(&c)
-}
diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go
deleted file mode 100644
index 7a1616a55..000000000
--- a/vendor/go.opencensus.io/trace/doc.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package trace contains support for OpenCensus distributed tracing.
-
-The following assumes a basic familiarity with OpenCensus concepts.
-See http://opencensus.io
-
-# Exporting Traces
-
-To export collected tracing data, register at least one exporter. You can use
-one of the provided exporters or write your own.
-
- trace.RegisterExporter(exporter)
-
-By default, traces will be sampled relatively rarely. To change the sampling
-frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler
-to sample a subset of traces, or use AlwaysSample to collect a trace on every run:
-
- trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
-
-Be careful about using trace.AlwaysSample in a production application with
-significant traffic: a new trace will be started and exported for every request.
-
-# Adding Spans to a Trace
-
-A trace consists of a tree of spans. In Go, the current span is carried in a
-context.Context.
-
-It is common to want to capture all the activity of a function call in a span. For
-this to work, the function must take a context.Context as a parameter. Add these two
-lines to the top of the function:
-
- ctx, span := trace.StartSpan(ctx, "example.com/Run")
- defer span.End()
-
-StartSpan will create a new top-level span if the context
-doesn't contain another span, otherwise it will create a child span.
-*/
-package trace // import "go.opencensus.io/trace"
diff --git a/vendor/go.opencensus.io/trace/evictedqueue.go b/vendor/go.opencensus.io/trace/evictedqueue.go
deleted file mode 100644
index ffc264f23..000000000
--- a/vendor/go.opencensus.io/trace/evictedqueue.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-type evictedQueue struct {
- queue []interface{}
- capacity int
- droppedCount int
-}
-
-func newEvictedQueue(capacity int) *evictedQueue {
- eq := &evictedQueue{
- capacity: capacity,
- queue: make([]interface{}, 0),
- }
-
- return eq
-}
-
-func (eq *evictedQueue) add(value interface{}) {
- if len(eq.queue) == eq.capacity {
- eq.queue = eq.queue[1:]
- eq.droppedCount++
- }
- eq.queue = append(eq.queue, value)
-}
diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go
deleted file mode 100644
index e0d9a4b99..000000000
--- a/vendor/go.opencensus.io/trace/export.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "sync"
- "sync/atomic"
- "time"
-)
-
-// Exporter is a type for functions that receive sampled trace spans.
-//
-// The ExportSpan method should be safe for concurrent use and should return
-// quickly; if an Exporter takes a significant amount of time to process a
-// SpanData, that work should be done on another goroutine.
-//
-// The SpanData should not be modified, but a pointer to it can be kept.
-type Exporter interface {
- ExportSpan(s *SpanData)
-}
-
-type exportersMap map[Exporter]struct{}
-
-var (
- exporterMu sync.Mutex
- exporters atomic.Value
-)
-
-// RegisterExporter adds to the list of Exporters that will receive sampled
-// trace spans.
-//
-// Binaries can register exporters, libraries shouldn't register exporters.
-func RegisterExporter(e Exporter) {
- exporterMu.Lock()
- new := make(exportersMap)
- if old, ok := exporters.Load().(exportersMap); ok {
- for k, v := range old {
- new[k] = v
- }
- }
- new[e] = struct{}{}
- exporters.Store(new)
- exporterMu.Unlock()
-}
-
-// UnregisterExporter removes from the list of Exporters the Exporter that was
-// registered with the given name.
-func UnregisterExporter(e Exporter) {
- exporterMu.Lock()
- new := make(exportersMap)
- if old, ok := exporters.Load().(exportersMap); ok {
- for k, v := range old {
- new[k] = v
- }
- }
- delete(new, e)
- exporters.Store(new)
- exporterMu.Unlock()
-}
-
-// SpanData contains all the information collected by a Span.
-type SpanData struct {
- SpanContext
- ParentSpanID SpanID
- SpanKind int
- Name string
- StartTime time.Time
- // The wall clock time of EndTime will be adjusted to always be offset
- // from StartTime by the duration of the span.
- EndTime time.Time
- // The values of Attributes each have type string, bool, or int64.
- Attributes map[string]interface{}
- Annotations []Annotation
- MessageEvents []MessageEvent
- Status
- Links []Link
- HasRemoteParent bool
- DroppedAttributeCount int
- DroppedAnnotationCount int
- DroppedMessageEventCount int
- DroppedLinkCount int
-
- // ChildSpanCount holds the number of child span created for this span.
- ChildSpanCount int
-}
diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go
deleted file mode 100644
index 7e808d8f3..000000000
--- a/vendor/go.opencensus.io/trace/internal/internal.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package internal provides trace internals.
-package internal
-
-// IDGenerator allows custom generators for TraceId and SpanId.
-type IDGenerator interface {
- NewTraceID() [16]byte
- NewSpanID() [8]byte
-}
diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go
deleted file mode 100644
index 80095a5f6..000000000
--- a/vendor/go.opencensus.io/trace/lrumap.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "github.com/golang/groupcache/lru"
-)
-
-// A simple lru.Cache wrapper that tracks the keys of the current contents and
-// the cumulative number of evicted items.
-type lruMap struct {
- cacheKeys map[lru.Key]bool
- cache *lru.Cache
- droppedCount int
-}
-
-func newLruMap(size int) *lruMap {
- lm := &lruMap{
- cacheKeys: make(map[lru.Key]bool),
- cache: lru.New(size),
- droppedCount: 0,
- }
- lm.cache.OnEvicted = func(key lru.Key, value interface{}) {
- delete(lm.cacheKeys, key)
- lm.droppedCount++
- }
- return lm
-}
-
-func (lm lruMap) len() int {
- return lm.cache.Len()
-}
-
-func (lm lruMap) keys() []interface{} {
- keys := make([]interface{}, 0, len(lm.cacheKeys))
- for k := range lm.cacheKeys {
- keys = append(keys, k)
- }
- return keys
-}
-
-func (lm *lruMap) add(key, value interface{}) {
- lm.cacheKeys[lru.Key(key)] = true
- lm.cache.Add(lru.Key(key), value)
-}
-
-func (lm *lruMap) get(key interface{}) (interface{}, bool) {
- return lm.cache.Get(key)
-}
diff --git a/vendor/go.opencensus.io/trace/propagation/propagation.go b/vendor/go.opencensus.io/trace/propagation/propagation.go
deleted file mode 100644
index 1eb190a96..000000000
--- a/vendor/go.opencensus.io/trace/propagation/propagation.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package propagation implements the binary trace context format.
-package propagation // import "go.opencensus.io/trace/propagation"
-
-// TODO: link to external spec document.
-
-// BinaryFormat format:
-//
-// Binary value:
-// version_id: 1 byte representing the version id.
-//
-// For version_id = 0:
-//
-// version_format:
-// field_format:
-//
-// Fields:
-//
-// TraceId: (field_id = 0, len = 16, default = "0000000000000000") - 16-byte array representing the trace_id.
-// SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array representing the span_id.
-// TraceOptions: (field_id = 2, len = 1, default = "0") - 1-byte array representing the trace_options.
-//
-// Fields MUST be encoded using the field id order (smaller to higher).
-//
-// Valid value example:
-//
-// {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97,
-// 98, 99, 100, 101, 102, 103, 104, 2, 1}
-//
-// version_id = 0;
-// trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79}
-// span_id = {97, 98, 99, 100, 101, 102, 103, 104};
-// trace_options = {1};
-
-import (
- "net/http"
-
- "go.opencensus.io/trace"
-)
-
-// Binary returns the binary format representation of a SpanContext.
-//
-// If sc is the zero value, Binary returns nil.
-func Binary(sc trace.SpanContext) []byte {
- if sc == (trace.SpanContext{}) {
- return nil
- }
- var b [29]byte
- copy(b[2:18], sc.TraceID[:])
- b[18] = 1
- copy(b[19:27], sc.SpanID[:])
- b[27] = 2
- b[28] = uint8(sc.TraceOptions)
- return b[:]
-}
-
-// FromBinary returns the SpanContext represented by b.
-//
-// If b has an unsupported version ID or contains no TraceID, FromBinary
-// returns with ok==false.
-func FromBinary(b []byte) (sc trace.SpanContext, ok bool) {
- if len(b) == 0 || b[0] != 0 {
- return trace.SpanContext{}, false
- }
- b = b[1:]
- if len(b) >= 17 && b[0] == 0 {
- copy(sc.TraceID[:], b[1:17])
- b = b[17:]
- } else {
- return trace.SpanContext{}, false
- }
- if len(b) >= 9 && b[0] == 1 {
- copy(sc.SpanID[:], b[1:9])
- b = b[9:]
- }
- if len(b) >= 2 && b[0] == 2 {
- sc.TraceOptions = trace.TraceOptions(b[1])
- }
- return sc, true
-}
-
-// HTTPFormat implementations propagate span contexts
-// in HTTP requests.
-//
-// SpanContextFromRequest extracts a span context from incoming
-// requests.
-//
-// SpanContextToRequest modifies the given request to include the given
-// span context.
-type HTTPFormat interface {
- SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool)
- SpanContextToRequest(sc trace.SpanContext, req *http.Request)
-}
-
-// TODO(jbd): Find a more representative but short name for HTTPFormat.
diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go
deleted file mode 100644
index 71c10f9e3..000000000
--- a/vendor/go.opencensus.io/trace/sampling.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "encoding/binary"
-)
-
-const defaultSamplingProbability = 1e-4
-
-// Sampler decides whether a trace should be sampled and exported.
-type Sampler func(SamplingParameters) SamplingDecision
-
-// SamplingParameters contains the values passed to a Sampler.
-type SamplingParameters struct {
- ParentContext SpanContext
- TraceID TraceID
- SpanID SpanID
- Name string
- HasRemoteParent bool
-}
-
-// SamplingDecision is the value returned by a Sampler.
-type SamplingDecision struct {
- Sample bool
-}
-
-// ProbabilitySampler returns a Sampler that samples a given fraction of traces.
-//
-// It also samples spans whose parents are sampled.
-func ProbabilitySampler(fraction float64) Sampler {
- if !(fraction >= 0) {
- fraction = 0
- } else if fraction >= 1 {
- return AlwaysSample()
- }
-
- traceIDUpperBound := uint64(fraction * (1 << 63))
- return Sampler(func(p SamplingParameters) SamplingDecision {
- if p.ParentContext.IsSampled() {
- return SamplingDecision{Sample: true}
- }
- x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1
- return SamplingDecision{Sample: x < traceIDUpperBound}
- })
-}
-
-// AlwaysSample returns a Sampler that samples every trace.
-// Be careful about using this sampler in a production application with
-// significant traffic: a new trace will be started and exported for every
-// request.
-func AlwaysSample() Sampler {
- return func(p SamplingParameters) SamplingDecision {
- return SamplingDecision{Sample: true}
- }
-}
-
-// NeverSample returns a Sampler that samples no traces.
-func NeverSample() Sampler {
- return func(p SamplingParameters) SamplingDecision {
- return SamplingDecision{Sample: false}
- }
-}
diff --git a/vendor/go.opencensus.io/trace/spanbucket.go b/vendor/go.opencensus.io/trace/spanbucket.go
deleted file mode 100644
index fbabad34c..000000000
--- a/vendor/go.opencensus.io/trace/spanbucket.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "time"
-)
-
-// samplePeriod is the minimum time between accepting spans in a single bucket.
-const samplePeriod = time.Second
-
-// defaultLatencies contains the default latency bucket bounds.
-// TODO: consider defaults, make configurable
-var defaultLatencies = [...]time.Duration{
- 10 * time.Microsecond,
- 100 * time.Microsecond,
- time.Millisecond,
- 10 * time.Millisecond,
- 100 * time.Millisecond,
- time.Second,
- 10 * time.Second,
- time.Minute,
-}
-
-// bucket is a container for a set of spans for a particular error code or latency range.
-type bucket struct {
- nextTime time.Time // next time we can accept a span
- buffer []*SpanData // circular buffer of spans
- nextIndex int // location next SpanData should be placed in buffer
- overflow bool // whether the circular buffer has wrapped around
-}
-
-func makeBucket(bufferSize int) bucket {
- return bucket{
- buffer: make([]*SpanData, bufferSize),
- }
-}
-
-// add adds a span to the bucket, if nextTime has been reached.
-func (b *bucket) add(s *SpanData) {
- if s.EndTime.Before(b.nextTime) {
- return
- }
- if len(b.buffer) == 0 {
- return
- }
- b.nextTime = s.EndTime.Add(samplePeriod)
- b.buffer[b.nextIndex] = s
- b.nextIndex++
- if b.nextIndex == len(b.buffer) {
- b.nextIndex = 0
- b.overflow = true
- }
-}
-
-// size returns the number of spans in the bucket.
-func (b *bucket) size() int {
- if b.overflow {
- return len(b.buffer)
- }
- return b.nextIndex
-}
-
-// span returns the ith span in the bucket.
-func (b *bucket) span(i int) *SpanData {
- if !b.overflow {
- return b.buffer[i]
- }
- if i < len(b.buffer)-b.nextIndex {
- return b.buffer[b.nextIndex+i]
- }
- return b.buffer[b.nextIndex+i-len(b.buffer)]
-}
-
-// resize changes the size of the bucket to n, keeping up to n existing spans.
-func (b *bucket) resize(n int) {
- cur := b.size()
- newBuffer := make([]*SpanData, n)
- if cur < n {
- for i := 0; i < cur; i++ {
- newBuffer[i] = b.span(i)
- }
- b.buffer = newBuffer
- b.nextIndex = cur
- b.overflow = false
- return
- }
- for i := 0; i < n; i++ {
- newBuffer[i] = b.span(i + cur - n)
- }
- b.buffer = newBuffer
- b.nextIndex = 0
- b.overflow = true
-}
-
-// latencyBucket returns the appropriate bucket number for a given latency.
-func latencyBucket(latency time.Duration) int {
- i := 0
- for i < len(defaultLatencies) && latency >= defaultLatencies[i] {
- i++
- }
- return i
-}
-
-// latencyBucketBounds returns the lower and upper bounds for a latency bucket
-// number.
-//
-// The lower bound is inclusive, the upper bound is exclusive (except for the
-// last bucket.)
-func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) {
- if index == 0 {
- return 0, defaultLatencies[index]
- }
- if index == len(defaultLatencies) {
- return defaultLatencies[index-1], 1<<63 - 1
- }
- return defaultLatencies[index-1], defaultLatencies[index]
-}
diff --git a/vendor/go.opencensus.io/trace/spanstore.go b/vendor/go.opencensus.io/trace/spanstore.go
deleted file mode 100644
index e601f76f2..000000000
--- a/vendor/go.opencensus.io/trace/spanstore.go
+++ /dev/null
@@ -1,308 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "sync"
- "time"
-
- "go.opencensus.io/internal"
-)
-
-const (
- maxBucketSize = 100000
- defaultBucketSize = 10
-)
-
-var (
- ssmu sync.RWMutex // protects spanStores
- spanStores = make(map[string]*spanStore)
-)
-
-// This exists purely to avoid exposing internal methods used by z-Pages externally.
-type internalOnly struct{}
-
-func init() {
- //TODO(#412): remove
- internal.Trace = &internalOnly{}
-}
-
-// ReportActiveSpans returns the active spans for the given name.
-func (i internalOnly) ReportActiveSpans(name string) []*SpanData {
- s := spanStoreForName(name)
- if s == nil {
- return nil
- }
- var out []*SpanData
- s.mu.Lock()
- defer s.mu.Unlock()
- for activeSpan := range s.active {
- if s, ok := activeSpan.(*span); ok {
- out = append(out, s.makeSpanData())
- }
- }
- return out
-}
-
-// ReportSpansByError returns a sample of error spans.
-//
-// If code is nonzero, only spans with that status code are returned.
-func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData {
- s := spanStoreForName(name)
- if s == nil {
- return nil
- }
- var out []*SpanData
- s.mu.Lock()
- defer s.mu.Unlock()
- if code != 0 {
- if b, ok := s.errors[code]; ok {
- for _, sd := range b.buffer {
- if sd == nil {
- break
- }
- out = append(out, sd)
- }
- }
- } else {
- for _, b := range s.errors {
- for _, sd := range b.buffer {
- if sd == nil {
- break
- }
- out = append(out, sd)
- }
- }
- }
- return out
-}
-
-// ConfigureBucketSizes sets the number of spans to keep per latency and error
-// bucket for different span names.
-func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) {
- for _, bc := range bcs {
- latencyBucketSize := bc.MaxRequestsSucceeded
- if latencyBucketSize < 0 {
- latencyBucketSize = 0
- }
- if latencyBucketSize > maxBucketSize {
- latencyBucketSize = maxBucketSize
- }
- errorBucketSize := bc.MaxRequestsErrors
- if errorBucketSize < 0 {
- errorBucketSize = 0
- }
- if errorBucketSize > maxBucketSize {
- errorBucketSize = maxBucketSize
- }
- spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize)
- }
-}
-
-// ReportSpansPerMethod returns a summary of what spans are being stored for each span name.
-func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary {
- out := make(map[string]internal.PerMethodSummary)
- ssmu.RLock()
- defer ssmu.RUnlock()
- for name, s := range spanStores {
- s.mu.Lock()
- p := internal.PerMethodSummary{
- Active: len(s.active),
- }
- for code, b := range s.errors {
- p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{
- ErrorCode: code,
- Size: b.size(),
- })
- }
- for i, b := range s.latency {
- min, max := latencyBucketBounds(i)
- p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{
- MinLatency: min,
- MaxLatency: max,
- Size: b.size(),
- })
- }
- s.mu.Unlock()
- out[name] = p
- }
- return out
-}
-
-// ReportSpansByLatency returns a sample of successful spans.
-//
-// minLatency is the minimum latency of spans to be returned.
-// maxLatency, if nonzero, is the maximum latency of spans to be returned.
-func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData {
- s := spanStoreForName(name)
- if s == nil {
- return nil
- }
- var out []*SpanData
- s.mu.Lock()
- defer s.mu.Unlock()
- for i, b := range s.latency {
- min, max := latencyBucketBounds(i)
- if i+1 != len(s.latency) && max <= minLatency {
- continue
- }
- if maxLatency != 0 && maxLatency < min {
- continue
- }
- for _, sd := range b.buffer {
- if sd == nil {
- break
- }
- if minLatency != 0 || maxLatency != 0 {
- d := sd.EndTime.Sub(sd.StartTime)
- if d < minLatency {
- continue
- }
- if maxLatency != 0 && d > maxLatency {
- continue
- }
- }
- out = append(out, sd)
- }
- }
- return out
-}
-
-// spanStore keeps track of spans stored for a particular span name.
-//
-// It contains all active spans; a sample of spans for failed requests,
-// categorized by error code; and a sample of spans for successful requests,
-// bucketed by latency.
-type spanStore struct {
- mu sync.Mutex // protects everything below.
- active map[SpanInterface]struct{}
- errors map[int32]*bucket
- latency []bucket
- maxSpansPerErrorBucket int
-}
-
-// newSpanStore creates a span store.
-func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore {
- s := &spanStore{
- active: make(map[SpanInterface]struct{}),
- latency: make([]bucket, len(defaultLatencies)+1),
- maxSpansPerErrorBucket: errorBucketSize,
- }
- for i := range s.latency {
- s.latency[i] = makeBucket(latencyBucketSize)
- }
- return s
-}
-
-// spanStoreForName returns the spanStore for the given name.
-//
-// It returns nil if it doesn't exist.
-func spanStoreForName(name string) *spanStore {
- var s *spanStore
- ssmu.RLock()
- s, _ = spanStores[name]
- ssmu.RUnlock()
- return s
-}
-
-// spanStoreForNameCreateIfNew returns the spanStore for the given name.
-//
-// It creates it if it didn't exist.
-func spanStoreForNameCreateIfNew(name string) *spanStore {
- ssmu.RLock()
- s, ok := spanStores[name]
- ssmu.RUnlock()
- if ok {
- return s
- }
- ssmu.Lock()
- defer ssmu.Unlock()
- s, ok = spanStores[name]
- if ok {
- return s
- }
- s = newSpanStore(name, defaultBucketSize, defaultBucketSize)
- spanStores[name] = s
- return s
-}
-
-// spanStoreSetSize resizes the spanStore for the given name.
-//
-// It creates it if it didn't exist.
-func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) {
- ssmu.RLock()
- s, ok := spanStores[name]
- ssmu.RUnlock()
- if ok {
- s.resize(latencyBucketSize, errorBucketSize)
- return
- }
- ssmu.Lock()
- defer ssmu.Unlock()
- s, ok = spanStores[name]
- if ok {
- s.resize(latencyBucketSize, errorBucketSize)
- return
- }
- s = newSpanStore(name, latencyBucketSize, errorBucketSize)
- spanStores[name] = s
-}
-
-func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) {
- s.mu.Lock()
- for i := range s.latency {
- s.latency[i].resize(latencyBucketSize)
- }
- for _, b := range s.errors {
- b.resize(errorBucketSize)
- }
- s.maxSpansPerErrorBucket = errorBucketSize
- s.mu.Unlock()
-}
-
-// add adds a span to the active bucket of the spanStore.
-func (s *spanStore) add(span SpanInterface) {
- s.mu.Lock()
- s.active[span] = struct{}{}
- s.mu.Unlock()
-}
-
-// finished removes a span from the active set, and adds a corresponding
-// SpanData to a latency or error bucket.
-func (s *spanStore) finished(span SpanInterface, sd *SpanData) {
- latency := sd.EndTime.Sub(sd.StartTime)
- if latency < 0 {
- latency = 0
- }
- code := sd.Status.Code
-
- s.mu.Lock()
- delete(s.active, span)
- if code == 0 {
- s.latency[latencyBucket(latency)].add(sd)
- } else {
- if s.errors == nil {
- s.errors = make(map[int32]*bucket)
- }
- if b := s.errors[code]; b != nil {
- b.add(sd)
- } else {
- b := makeBucket(s.maxSpansPerErrorBucket)
- s.errors[code] = &b
- b.add(sd)
- }
- }
- s.mu.Unlock()
-}
diff --git a/vendor/go.opencensus.io/trace/status_codes.go b/vendor/go.opencensus.io/trace/status_codes.go
deleted file mode 100644
index ec60effd1..000000000
--- a/vendor/go.opencensus.io/trace/status_codes.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-// Status codes for use with Span.SetStatus. These correspond to the status
-// codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
-const (
- StatusCodeOK = 0
- StatusCodeCancelled = 1
- StatusCodeUnknown = 2
- StatusCodeInvalidArgument = 3
- StatusCodeDeadlineExceeded = 4
- StatusCodeNotFound = 5
- StatusCodeAlreadyExists = 6
- StatusCodePermissionDenied = 7
- StatusCodeResourceExhausted = 8
- StatusCodeFailedPrecondition = 9
- StatusCodeAborted = 10
- StatusCodeOutOfRange = 11
- StatusCodeUnimplemented = 12
- StatusCodeInternal = 13
- StatusCodeUnavailable = 14
- StatusCodeDataLoss = 15
- StatusCodeUnauthenticated = 16
-)
diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go
deleted file mode 100644
index 861df9d39..000000000
--- a/vendor/go.opencensus.io/trace/trace.go
+++ /dev/null
@@ -1,595 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "context"
- crand "crypto/rand"
- "encoding/binary"
- "fmt"
- "math/rand"
- "sync"
- "sync/atomic"
- "time"
-
- "go.opencensus.io/internal"
- "go.opencensus.io/trace/tracestate"
-)
-
-type tracer struct{}
-
-var _ Tracer = &tracer{}
-
-// Span represents a span of a trace. It has an associated SpanContext, and
-// stores data accumulated while the span is active.
-//
-// Ideally users should interact with Spans by calling the functions in this
-// package that take a Context parameter.
-type span struct {
- // data contains information recorded about the span.
- //
- // It will be non-nil if we are exporting the span or recording events for it.
- // Otherwise, data is nil, and the Span is simply a carrier for the
- // SpanContext, so that the trace ID is propagated.
- data *SpanData
- mu sync.Mutex // protects the contents of *data (but not the pointer value.)
- spanContext SpanContext
-
- // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry
- // is removed to create room for a new entry.
- lruAttributes *lruMap
-
- // annotations are stored in FIFO queue capped by configured limit.
- annotations *evictedQueue
-
- // messageEvents are stored in FIFO queue capped by configured limit.
- messageEvents *evictedQueue
-
- // links are stored in FIFO queue capped by configured limit.
- links *evictedQueue
-
- // spanStore is the spanStore this span belongs to, if any, otherwise it is nil.
- *spanStore
- endOnce sync.Once
-
- executionTracerTaskEnd func() // ends the execution tracer span
-}
-
-// IsRecordingEvents returns true if events are being recorded for this span.
-// Use this check to avoid computing expensive annotations when they will never
-// be used.
-func (s *span) IsRecordingEvents() bool {
- if s == nil {
- return false
- }
- return s.data != nil
-}
-
-// TraceOptions contains options associated with a trace span.
-type TraceOptions uint32
-
-// IsSampled returns true if the span will be exported.
-func (sc SpanContext) IsSampled() bool {
- return sc.TraceOptions.IsSampled()
-}
-
-// setIsSampled sets the TraceOptions bit that determines whether the span will be exported.
-func (sc *SpanContext) setIsSampled(sampled bool) {
- if sampled {
- sc.TraceOptions |= 1
- } else {
- sc.TraceOptions &= ^TraceOptions(1)
- }
-}
-
-// IsSampled returns true if the span will be exported.
-func (t TraceOptions) IsSampled() bool {
- return t&1 == 1
-}
-
-// SpanContext contains the state that must propagate across process boundaries.
-//
-// SpanContext is not an implementation of context.Context.
-// TODO: add reference to external Census docs for SpanContext.
-type SpanContext struct {
- TraceID TraceID
- SpanID SpanID
- TraceOptions TraceOptions
- Tracestate *tracestate.Tracestate
-}
-
-type contextKey struct{}
-
-// FromContext returns the Span stored in a context, or nil if there isn't one.
-func (t *tracer) FromContext(ctx context.Context) *Span {
- s, _ := ctx.Value(contextKey{}).(*Span)
- return s
-}
-
-// NewContext returns a new context with the given Span attached.
-func (t *tracer) NewContext(parent context.Context, s *Span) context.Context {
- return context.WithValue(parent, contextKey{}, s)
-}
-
-// All available span kinds. Span kind must be either one of these values.
-const (
- SpanKindUnspecified = iota
- SpanKindServer
- SpanKindClient
-)
-
-// StartOptions contains options concerning how a span is started.
-type StartOptions struct {
- // Sampler to consult for this Span. If provided, it is always consulted.
- //
- // If not provided, then the behavior differs based on whether
- // the parent of this Span is remote, local, or there is no parent.
- // In the case of a remote parent or no parent, the
- // default sampler (see Config) will be consulted. Otherwise,
- // when there is a non-remote parent, no new sampling decision will be made:
- // we will preserve the sampling of the parent.
- Sampler Sampler
-
- // SpanKind represents the kind of a span. If none is set,
- // SpanKindUnspecified is used.
- SpanKind int
-}
-
-// StartOption apply changes to StartOptions.
-type StartOption func(*StartOptions)
-
-// WithSpanKind makes new spans to be created with the given kind.
-func WithSpanKind(spanKind int) StartOption {
- return func(o *StartOptions) {
- o.SpanKind = spanKind
- }
-}
-
-// WithSampler makes new spans to be be created with a custom sampler.
-// Otherwise, the global sampler is used.
-func WithSampler(sampler Sampler) StartOption {
- return func(o *StartOptions) {
- o.Sampler = sampler
- }
-}
-
-// StartSpan starts a new child span of the current span in the context. If
-// there is no span in the context, creates a new trace and span.
-//
-// Returned context contains the newly created span. You can use it to
-// propagate the returned span in process.
-func (t *tracer) StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) {
- var opts StartOptions
- var parent SpanContext
- if p := t.FromContext(ctx); p != nil {
- if ps, ok := p.internal.(*span); ok {
- ps.addChild()
- }
- parent = p.SpanContext()
- }
- for _, op := range o {
- op(&opts)
- }
- span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts)
-
- ctx, end := startExecutionTracerTask(ctx, name)
- span.executionTracerTaskEnd = end
- extSpan := NewSpan(span)
- return t.NewContext(ctx, extSpan), extSpan
-}
-
-// StartSpanWithRemoteParent starts a new child span of the span from the given parent.
-//
-// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is
-// preferred for cases where the parent is propagated via an incoming request.
-//
-// Returned context contains the newly created span. You can use it to
-// propagate the returned span in process.
-func (t *tracer) StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) {
- var opts StartOptions
- for _, op := range o {
- op(&opts)
- }
- span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts)
- ctx, end := startExecutionTracerTask(ctx, name)
- span.executionTracerTaskEnd = end
- extSpan := NewSpan(span)
- return t.NewContext(ctx, extSpan), extSpan
-}
-
-func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *span {
- s := &span{}
- s.spanContext = parent
-
- cfg := config.Load().(*Config)
- if gen, ok := cfg.IDGenerator.(*defaultIDGenerator); ok {
- // lazy initialization
- gen.init()
- }
-
- if !hasParent {
- s.spanContext.TraceID = cfg.IDGenerator.NewTraceID()
- }
- s.spanContext.SpanID = cfg.IDGenerator.NewSpanID()
- sampler := cfg.DefaultSampler
-
- if !hasParent || remoteParent || o.Sampler != nil {
- // If this span is the child of a local span and no Sampler is set in the
- // options, keep the parent's TraceOptions.
- //
- // Otherwise, consult the Sampler in the options if it is non-nil, otherwise
- // the default sampler.
- if o.Sampler != nil {
- sampler = o.Sampler
- }
- s.spanContext.setIsSampled(sampler(SamplingParameters{
- ParentContext: parent,
- TraceID: s.spanContext.TraceID,
- SpanID: s.spanContext.SpanID,
- Name: name,
- HasRemoteParent: remoteParent}).Sample)
- }
-
- if !internal.LocalSpanStoreEnabled && !s.spanContext.IsSampled() {
- return s
- }
-
- s.data = &SpanData{
- SpanContext: s.spanContext,
- StartTime: time.Now(),
- SpanKind: o.SpanKind,
- Name: name,
- HasRemoteParent: remoteParent,
- }
- s.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan)
- s.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan)
- s.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan)
- s.links = newEvictedQueue(cfg.MaxLinksPerSpan)
-
- if hasParent {
- s.data.ParentSpanID = parent.SpanID
- }
- if internal.LocalSpanStoreEnabled {
- var ss *spanStore
- ss = spanStoreForNameCreateIfNew(name)
- if ss != nil {
- s.spanStore = ss
- ss.add(s)
- }
- }
-
- return s
-}
-
-// End ends the span.
-func (s *span) End() {
- if s == nil {
- return
- }
- if s.executionTracerTaskEnd != nil {
- s.executionTracerTaskEnd()
- }
- if !s.IsRecordingEvents() {
- return
- }
- s.endOnce.Do(func() {
- exp, _ := exporters.Load().(exportersMap)
- mustExport := s.spanContext.IsSampled() && len(exp) > 0
- if s.spanStore != nil || mustExport {
- sd := s.makeSpanData()
- sd.EndTime = internal.MonotonicEndTime(sd.StartTime)
- if s.spanStore != nil {
- s.spanStore.finished(s, sd)
- }
- if mustExport {
- for e := range exp {
- e.ExportSpan(sd)
- }
- }
- }
- })
-}
-
-// makeSpanData produces a SpanData representing the current state of the Span.
-// It requires that s.data is non-nil.
-func (s *span) makeSpanData() *SpanData {
- var sd SpanData
- s.mu.Lock()
- sd = *s.data
- if s.lruAttributes.len() > 0 {
- sd.Attributes = s.lruAttributesToAttributeMap()
- sd.DroppedAttributeCount = s.lruAttributes.droppedCount
- }
- if len(s.annotations.queue) > 0 {
- sd.Annotations = s.interfaceArrayToAnnotationArray()
- sd.DroppedAnnotationCount = s.annotations.droppedCount
- }
- if len(s.messageEvents.queue) > 0 {
- sd.MessageEvents = s.interfaceArrayToMessageEventArray()
- sd.DroppedMessageEventCount = s.messageEvents.droppedCount
- }
- if len(s.links.queue) > 0 {
- sd.Links = s.interfaceArrayToLinksArray()
- sd.DroppedLinkCount = s.links.droppedCount
- }
- s.mu.Unlock()
- return &sd
-}
-
-// SpanContext returns the SpanContext of the span.
-func (s *span) SpanContext() SpanContext {
- if s == nil {
- return SpanContext{}
- }
- return s.spanContext
-}
-
-// SetName sets the name of the span, if it is recording events.
-func (s *span) SetName(name string) {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- s.data.Name = name
- s.mu.Unlock()
-}
-
-// SetStatus sets the status of the span, if it is recording events.
-func (s *span) SetStatus(status Status) {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- s.data.Status = status
- s.mu.Unlock()
-}
-
-func (s *span) interfaceArrayToLinksArray() []Link {
- linksArr := make([]Link, 0, len(s.links.queue))
- for _, value := range s.links.queue {
- linksArr = append(linksArr, value.(Link))
- }
- return linksArr
-}
-
-func (s *span) interfaceArrayToMessageEventArray() []MessageEvent {
- messageEventArr := make([]MessageEvent, 0, len(s.messageEvents.queue))
- for _, value := range s.messageEvents.queue {
- messageEventArr = append(messageEventArr, value.(MessageEvent))
- }
- return messageEventArr
-}
-
-func (s *span) interfaceArrayToAnnotationArray() []Annotation {
- annotationArr := make([]Annotation, 0, len(s.annotations.queue))
- for _, value := range s.annotations.queue {
- annotationArr = append(annotationArr, value.(Annotation))
- }
- return annotationArr
-}
-
-func (s *span) lruAttributesToAttributeMap() map[string]interface{} {
- attributes := make(map[string]interface{}, s.lruAttributes.len())
- for _, key := range s.lruAttributes.keys() {
- value, ok := s.lruAttributes.get(key)
- if ok {
- keyStr := key.(string)
- attributes[keyStr] = value
- }
- }
- return attributes
-}
-
-func (s *span) copyToCappedAttributes(attributes []Attribute) {
- for _, a := range attributes {
- s.lruAttributes.add(a.key, a.value)
- }
-}
-
-func (s *span) addChild() {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- s.data.ChildSpanCount++
- s.mu.Unlock()
-}
-
-// AddAttributes sets attributes in the span.
-//
-// Existing attributes whose keys appear in the attributes parameter are overwritten.
-func (s *span) AddAttributes(attributes ...Attribute) {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- s.copyToCappedAttributes(attributes)
- s.mu.Unlock()
-}
-
-func (s *span) printStringInternal(attributes []Attribute, str string) {
- now := time.Now()
- var am map[string]interface{}
- if len(attributes) != 0 {
- am = make(map[string]interface{}, len(attributes))
- for _, attr := range attributes {
- am[attr.key] = attr.value
- }
- }
- s.mu.Lock()
- s.annotations.add(Annotation{
- Time: now,
- Message: str,
- Attributes: am,
- })
- s.mu.Unlock()
-}
-
-// Annotate adds an annotation with attributes.
-// Attributes can be nil.
-func (s *span) Annotate(attributes []Attribute, str string) {
- if !s.IsRecordingEvents() {
- return
- }
- s.printStringInternal(attributes, str)
-}
-
-// Annotatef adds an annotation with attributes.
-func (s *span) Annotatef(attributes []Attribute, format string, a ...interface{}) {
- if !s.IsRecordingEvents() {
- return
- }
- s.printStringInternal(attributes, fmt.Sprintf(format, a...))
-}
-
-// AddMessageSendEvent adds a message send event to the span.
-//
-// messageID is an identifier for the message, which is recommended to be
-// unique in this span and the same between the send event and the receive
-// event (this allows to identify a message between the sender and receiver).
-// For example, this could be a sequence id.
-func (s *span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
- if !s.IsRecordingEvents() {
- return
- }
- now := time.Now()
- s.mu.Lock()
- s.messageEvents.add(MessageEvent{
- Time: now,
- EventType: MessageEventTypeSent,
- MessageID: messageID,
- UncompressedByteSize: uncompressedByteSize,
- CompressedByteSize: compressedByteSize,
- })
- s.mu.Unlock()
-}
-
-// AddMessageReceiveEvent adds a message receive event to the span.
-//
-// messageID is an identifier for the message, which is recommended to be
-// unique in this span and the same between the send event and the receive
-// event (this allows to identify a message between the sender and receiver).
-// For example, this could be a sequence id.
-func (s *span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
- if !s.IsRecordingEvents() {
- return
- }
- now := time.Now()
- s.mu.Lock()
- s.messageEvents.add(MessageEvent{
- Time: now,
- EventType: MessageEventTypeRecv,
- MessageID: messageID,
- UncompressedByteSize: uncompressedByteSize,
- CompressedByteSize: compressedByteSize,
- })
- s.mu.Unlock()
-}
-
-// AddLink adds a link to the span.
-func (s *span) AddLink(l Link) {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- s.links.add(l)
- s.mu.Unlock()
-}
-
-func (s *span) String() string {
- if s == nil {
- return ""
- }
- if s.data == nil {
- return fmt.Sprintf("span %s", s.spanContext.SpanID)
- }
- s.mu.Lock()
- str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name)
- s.mu.Unlock()
- return str
-}
-
-var config atomic.Value // access atomically
-
-func init() {
- config.Store(&Config{
- DefaultSampler: ProbabilitySampler(defaultSamplingProbability),
- IDGenerator: &defaultIDGenerator{},
- MaxAttributesPerSpan: DefaultMaxAttributesPerSpan,
- MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan,
- MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan,
- MaxLinksPerSpan: DefaultMaxLinksPerSpan,
- })
-}
-
-type defaultIDGenerator struct {
- sync.Mutex
-
- // Please keep these as the first fields
- // so that these 8 byte fields will be aligned on addresses
- // divisible by 8, on both 32-bit and 64-bit machines when
- // performing atomic increments and accesses.
- // See:
- // * https://github.com/census-instrumentation/opencensus-go/issues/587
- // * https://github.com/census-instrumentation/opencensus-go/issues/865
- // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG
- nextSpanID uint64
- spanIDInc uint64
-
- traceIDAdd [2]uint64
- traceIDRand *rand.Rand
-
- initOnce sync.Once
-}
-
-// init initializes the generator on the first call to avoid consuming entropy
-// unnecessarily.
-func (gen *defaultIDGenerator) init() {
- gen.initOnce.Do(func() {
- // initialize traceID and spanID generators.
- var rngSeed int64
- for _, p := range []interface{}{
- &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc,
- } {
- binary.Read(crand.Reader, binary.LittleEndian, p)
- }
- gen.traceIDRand = rand.New(rand.NewSource(rngSeed))
- gen.spanIDInc |= 1
- })
-}
-
-// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
-func (gen *defaultIDGenerator) NewSpanID() [8]byte {
- var id uint64
- for id == 0 {
- id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc)
- }
- var sid [8]byte
- binary.LittleEndian.PutUint64(sid[:], id)
- return sid
-}
-
-// NewTraceID returns a non-zero trace ID from a randomly-chosen sequence.
-// mu should be held while this function is called.
-func (gen *defaultIDGenerator) NewTraceID() [16]byte {
- var tid [16]byte
- // Construct the trace ID from two outputs of traceIDRand, with a constant
- // added to each half for additional entropy.
- gen.Lock()
- binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0])
- binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1])
- gen.Unlock()
- return tid
-}
diff --git a/vendor/go.opencensus.io/trace/trace_api.go b/vendor/go.opencensus.io/trace/trace_api.go
deleted file mode 100644
index 9e2c3a999..000000000
--- a/vendor/go.opencensus.io/trace/trace_api.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2020, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "context"
-)
-
-// DefaultTracer is the tracer used when package-level exported functions are invoked.
-var DefaultTracer Tracer = &tracer{}
-
-// Tracer can start spans and access context functions.
-type Tracer interface {
-
- // StartSpan starts a new child span of the current span in the context. If
- // there is no span in the context, creates a new trace and span.
- //
- // Returned context contains the newly created span. You can use it to
- // propagate the returned span in process.
- StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span)
-
- // StartSpanWithRemoteParent starts a new child span of the span from the given parent.
- //
- // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is
- // preferred for cases where the parent is propagated via an incoming request.
- //
- // Returned context contains the newly created span. You can use it to
- // propagate the returned span in process.
- StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span)
-
- // FromContext returns the Span stored in a context, or nil if there isn't one.
- FromContext(ctx context.Context) *Span
-
- // NewContext returns a new context with the given Span attached.
- NewContext(parent context.Context, s *Span) context.Context
-}
-
-// StartSpan starts a new child span of the current span in the context. If
-// there is no span in the context, creates a new trace and span.
-//
-// Returned context contains the newly created span. You can use it to
-// propagate the returned span in process.
-func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) {
- return DefaultTracer.StartSpan(ctx, name, o...)
-}
-
-// StartSpanWithRemoteParent starts a new child span of the span from the given parent.
-//
-// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is
-// preferred for cases where the parent is propagated via an incoming request.
-//
-// Returned context contains the newly created span. You can use it to
-// propagate the returned span in process.
-func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) {
- return DefaultTracer.StartSpanWithRemoteParent(ctx, name, parent, o...)
-}
-
-// FromContext returns the Span stored in a context, or a Span that is not
-// recording events if there isn't one.
-func FromContext(ctx context.Context) *Span {
- return DefaultTracer.FromContext(ctx)
-}
-
-// NewContext returns a new context with the given Span attached.
-func NewContext(parent context.Context, s *Span) context.Context {
- return DefaultTracer.NewContext(parent, s)
-}
-
-// SpanInterface represents a span of a trace. It has an associated SpanContext, and
-// stores data accumulated while the span is active.
-//
-// Ideally users should interact with Spans by calling the functions in this
-// package that take a Context parameter.
-type SpanInterface interface {
-
- // IsRecordingEvents returns true if events are being recorded for this span.
- // Use this check to avoid computing expensive annotations when they will never
- // be used.
- IsRecordingEvents() bool
-
- // End ends the span.
- End()
-
- // SpanContext returns the SpanContext of the span.
- SpanContext() SpanContext
-
- // SetName sets the name of the span, if it is recording events.
- SetName(name string)
-
- // SetStatus sets the status of the span, if it is recording events.
- SetStatus(status Status)
-
- // AddAttributes sets attributes in the span.
- //
- // Existing attributes whose keys appear in the attributes parameter are overwritten.
- AddAttributes(attributes ...Attribute)
-
- // Annotate adds an annotation with attributes.
- // Attributes can be nil.
- Annotate(attributes []Attribute, str string)
-
- // Annotatef adds an annotation with attributes.
- Annotatef(attributes []Attribute, format string, a ...interface{})
-
- // AddMessageSendEvent adds a message send event to the span.
- //
- // messageID is an identifier for the message, which is recommended to be
- // unique in this span and the same between the send event and the receive
- // event (this allows to identify a message between the sender and receiver).
- // For example, this could be a sequence id.
- AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64)
-
- // AddMessageReceiveEvent adds a message receive event to the span.
- //
- // messageID is an identifier for the message, which is recommended to be
- // unique in this span and the same between the send event and the receive
- // event (this allows to identify a message between the sender and receiver).
- // For example, this could be a sequence id.
- AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64)
-
- // AddLink adds a link to the span.
- AddLink(l Link)
-
- // String prints a string representation of a span.
- String() string
-}
-
-// NewSpan is a convenience function for creating a *Span out of a *span
-func NewSpan(s SpanInterface) *Span {
- return &Span{internal: s}
-}
-
-// Span is a struct wrapper around the SpanInt interface, which allows correctly handling
-// nil spans, while also allowing the SpanInterface implementation to be swapped out.
-type Span struct {
- internal SpanInterface
-}
-
-// Internal returns the underlying implementation of the Span
-func (s *Span) Internal() SpanInterface {
- return s.internal
-}
-
-// IsRecordingEvents returns true if events are being recorded for this span.
-// Use this check to avoid computing expensive annotations when they will never
-// be used.
-func (s *Span) IsRecordingEvents() bool {
- if s == nil {
- return false
- }
- return s.internal.IsRecordingEvents()
-}
-
-// End ends the span.
-func (s *Span) End() {
- if s == nil {
- return
- }
- s.internal.End()
-}
-
-// SpanContext returns the SpanContext of the span.
-func (s *Span) SpanContext() SpanContext {
- if s == nil {
- return SpanContext{}
- }
- return s.internal.SpanContext()
-}
-
-// SetName sets the name of the span, if it is recording events.
-func (s *Span) SetName(name string) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.SetName(name)
-}
-
-// SetStatus sets the status of the span, if it is recording events.
-func (s *Span) SetStatus(status Status) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.SetStatus(status)
-}
-
-// AddAttributes sets attributes in the span.
-//
-// Existing attributes whose keys appear in the attributes parameter are overwritten.
-func (s *Span) AddAttributes(attributes ...Attribute) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.AddAttributes(attributes...)
-}
-
-// Annotate adds an annotation with attributes.
-// Attributes can be nil.
-func (s *Span) Annotate(attributes []Attribute, str string) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.Annotate(attributes, str)
-}
-
-// Annotatef adds an annotation with attributes.
-func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.Annotatef(attributes, format, a...)
-}
-
-// AddMessageSendEvent adds a message send event to the span.
-//
-// messageID is an identifier for the message, which is recommended to be
-// unique in this span and the same between the send event and the receive
-// event (this allows to identify a message between the sender and receiver).
-// For example, this could be a sequence id.
-func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize)
-}
-
-// AddMessageReceiveEvent adds a message receive event to the span.
-//
-// messageID is an identifier for the message, which is recommended to be
-// unique in this span and the same between the send event and the receive
-// event (this allows to identify a message between the sender and receiver).
-// For example, this could be a sequence id.
-func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize)
-}
-
-// AddLink adds a link to the span.
-func (s *Span) AddLink(l Link) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.AddLink(l)
-}
-
-// String prints a string representation of a span.
-func (s *Span) String() string {
- if s == nil {
- return ""
- }
- return s.internal.String()
-}
diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go
deleted file mode 100644
index b8fc1e495..000000000
--- a/vendor/go.opencensus.io/trace/trace_go11.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.11
-// +build go1.11
-
-package trace
-
-import (
- "context"
- t "runtime/trace"
-)
-
-func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) {
- if !t.IsEnabled() {
- // Avoid additional overhead if
- // runtime/trace is not enabled.
- return ctx, func() {}
- }
- nctx, task := t.NewTask(ctx, name)
- return nctx, task.End
-}
diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go
deleted file mode 100644
index da488fc87..000000000
--- a/vendor/go.opencensus.io/trace/trace_nongo11.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.11
-// +build !go1.11
-
-package trace
-
-import (
- "context"
-)
-
-func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) {
- return ctx, func() {}
-}
diff --git a/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/vendor/go.opencensus.io/trace/tracestate/tracestate.go
deleted file mode 100644
index 2d6c713eb..000000000
--- a/vendor/go.opencensus.io/trace/tracestate/tracestate.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package tracestate implements support for the Tracestate header of the
-// W3C TraceContext propagation format.
-package tracestate
-
-import (
- "fmt"
- "regexp"
-)
-
-const (
- keyMaxSize = 256
- valueMaxSize = 256
- maxKeyValuePairs = 32
-)
-
-const (
- keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}`
- keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}`
- keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)`
- valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]`
-)
-
-var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`)
-var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`)
-
-// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different
-// vendors propagate additional information and inter-operate with their legacy Id formats.
-type Tracestate struct {
- entries []Entry
-}
-
-// Entry represents one key-value pair in a list of key-value pair of Tracestate.
-type Entry struct {
- // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter,
- // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and
- // forward slashes /.
- Key string
-
- // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the
- // range 0x20 to 0x7E) except comma , and =.
- Value string
-}
-
-// Entries returns a slice of Entry.
-func (ts *Tracestate) Entries() []Entry {
- if ts == nil {
- return nil
- }
- return ts.entries
-}
-
-func (ts *Tracestate) remove(key string) *Entry {
- for index, entry := range ts.entries {
- if entry.Key == key {
- ts.entries = append(ts.entries[:index], ts.entries[index+1:]...)
- return &entry
- }
- }
- return nil
-}
-
-func (ts *Tracestate) add(entries []Entry) error {
- for _, entry := range entries {
- ts.remove(entry.Key)
- }
- if len(ts.entries)+len(entries) > maxKeyValuePairs {
- return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d",
- len(entries), len(ts.entries), maxKeyValuePairs)
- }
- ts.entries = append(entries, ts.entries...)
- return nil
-}
-
-func isValid(entry Entry) bool {
- return keyValidationRegExp.MatchString(entry.Key) &&
- valueValidationRegExp.MatchString(entry.Value)
-}
-
-func containsDuplicateKey(entries ...Entry) (string, bool) {
- keyMap := make(map[string]int)
- for _, entry := range entries {
- if _, ok := keyMap[entry.Key]; ok {
- return entry.Key, true
- }
- keyMap[entry.Key] = 1
- }
- return "", false
-}
-
-func areEntriesValid(entries ...Entry) (*Entry, bool) {
- for _, entry := range entries {
- if !isValid(entry) {
- return &entry, false
- }
- }
- return nil, true
-}
-
-// New creates a Tracestate object from a parent and/or entries (key-value pair).
-// Entries from the parent are copied if present. The entries passed to this function
-// are inserted in front of those copied from the parent. If an entry copied from the
-// parent contains the same key as one of the entry in entries then the entry copied
-// from the parent is removed. See add func.
-//
-// An error is returned with nil Tracestate if
-// 1. one or more entry in entries is invalid.
-// 2. two or more entries in the input entries have the same key.
-// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs.
-// (duplicate entry is counted only once).
-func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) {
- if parent == nil && len(entries) == 0 {
- return nil, nil
- }
- if entry, ok := areEntriesValid(entries...); !ok {
- return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value)
- }
-
- if key, duplicate := containsDuplicateKey(entries...); duplicate {
- return nil, fmt.Errorf("contains duplicate keys (%s)", key)
- }
-
- tracestate := Tracestate{}
-
- if parent != nil && len(parent.entries) > 0 {
- tracestate.entries = append([]Entry{}, parent.entries...)
- }
-
- err := tracestate.add(entries)
- if err != nil {
- return nil, err
- }
- return &tracestate, nil
-}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md
new file mode 100644
index 000000000..773c9b643
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md
@@ -0,0 +1,27 @@
+# Contributing to go.opentelemetry.io/auto/sdk
+
+The `go.opentelemetry.io/auto/sdk` module is a purpose built OpenTelemetry SDK.
+It is designed to be:
+
+0. An OpenTelemetry compliant SDK
+1. Instrumented by auto-instrumentation (serializable into OTLP JSON)
+2. Lightweight
+3. User-friendly
+
+These design choices are listed in the order of their importance.
+
+The primary design goal of this module is to be an OpenTelemetry SDK.
+This means that it needs to implement the Go APIs found in `go.opentelemetry.io/otel`.
+
+Having met the requirement of SDK compliance, this module needs to provide code that the `go.opentelemetry.io/auto` module can instrument.
+The chosen approach to meet this goal is to ensure the telemetry from the SDK is serializable into JSON encoded OTLP.
+This ensures then that the serialized form is compatible with other OpenTelemetry systems, and the auto-instrumentation can use these systems to deserialize any telemetry it is sent.
+
+Outside of these first two goals, the intended use becomes relevant.
+This package is intended to be used in the `go.opentelemetry.io/otel` global API as a default when the auto-instrumentation is running.
+Because of this, this package needs to not add unnecessary dependencies to that API.
+Ideally, it adds none.
+It also needs to operate efficiently.
+
+Finally, this module is designed to be user-friendly to Go development.
+It hides complexity in order to provide simpler APIs when the previous goals can all still be met.
diff --git a/vendor/contrib.go.opencensus.io/exporter/prometheus/LICENSE b/vendor/go.opentelemetry.io/auto/sdk/LICENSE
similarity index 100%
rename from vendor/contrib.go.opencensus.io/exporter/prometheus/LICENSE
rename to vendor/go.opentelemetry.io/auto/sdk/LICENSE
diff --git a/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md
new file mode 100644
index 000000000..088d19a6c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md
@@ -0,0 +1,15 @@
+# Versioning
+
+This document describes the versioning policy for this module.
+This policy is designed so the following goals can be achieved.
+
+**Users are provided a codebase of value that is stable and secure.**
+
+## Policy
+
+* Versioning of this module will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules).
+ * [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used.
+ * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html).
+ * Any `v2` or higher version of this module will be included as a `/vN` at the end of the module path used in `go.mod` files and in the package import path.
+
+* GitHub releases will be made for all releases.
diff --git a/vendor/go.opentelemetry.io/auto/sdk/doc.go b/vendor/go.opentelemetry.io/auto/sdk/doc.go
new file mode 100644
index 000000000..ad73d8cb9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/doc.go
@@ -0,0 +1,14 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package sdk provides an auto-instrumentable OpenTelemetry SDK.
+
+An [go.opentelemetry.io/auto.Instrumentation] can be configured to target the
+process running this SDK. In that case, all telemetry the SDK produces will be
+processed and handled by that [go.opentelemetry.io/auto.Instrumentation].
+
+By default, if there is no [go.opentelemetry.io/auto.Instrumentation] set to
+auto-instrument the SDK, the SDK will not generate any telemetry.
+*/
+package sdk
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go
new file mode 100644
index 000000000..af6ef171f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go
@@ -0,0 +1,58 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+// Attr is a key-value pair.
+type Attr struct {
+ Key string `json:"key,omitempty"`
+ Value Value `json:"value,omitempty"`
+}
+
+// String returns an Attr for a string value.
+func String(key, value string) Attr {
+ return Attr{key, StringValue(value)}
+}
+
+// Int64 returns an Attr for an int64 value.
+func Int64(key string, value int64) Attr {
+ return Attr{key, Int64Value(value)}
+}
+
+// Int returns an Attr for an int value.
+func Int(key string, value int) Attr {
+ return Int64(key, int64(value))
+}
+
+// Float64 returns an Attr for a float64 value.
+func Float64(key string, value float64) Attr {
+ return Attr{key, Float64Value(value)}
+}
+
+// Bool returns an Attr for a bool value.
+func Bool(key string, value bool) Attr {
+ return Attr{key, BoolValue(value)}
+}
+
+// Bytes returns an Attr for a []byte value.
+// The passed slice must not be changed after it is passed.
+func Bytes(key string, value []byte) Attr {
+ return Attr{key, BytesValue(value)}
+}
+
+// Slice returns an Attr for a []Value value.
+// The passed slice must not be changed after it is passed.
+func Slice(key string, value ...Value) Attr {
+ return Attr{key, SliceValue(value...)}
+}
+
+// Map returns an Attr for a map value.
+// The passed slice must not be changed after it is passed.
+func Map(key string, value ...Attr) Attr {
+ return Attr{key, MapValue(value...)}
+}
+
+// Equal returns if a is equal to b.
+func (a Attr) Equal(b Attr) bool {
+ return a.Key == b.Key && a.Value.Equal(b.Value)
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go
new file mode 100644
index 000000000..949e2165c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go
@@ -0,0 +1,8 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package telemetry provides a lightweight representations of OpenTelemetry
+telemetry that is compatible with the OTLP JSON protobuf encoding.
+*/
+package telemetry
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go
new file mode 100644
index 000000000..e854d7e84
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go
@@ -0,0 +1,103 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+)
+
+const (
+ traceIDSize = 16
+ spanIDSize = 8
+)
+
+// TraceID is a custom data type that is used for all trace IDs.
+type TraceID [traceIDSize]byte
+
+// String returns the hex string representation form of a TraceID.
+func (tid TraceID) String() string {
+ return hex.EncodeToString(tid[:])
+}
+
+// IsEmpty returns false if id contains at least one non-zero byte.
+func (tid TraceID) IsEmpty() bool {
+ return tid == [traceIDSize]byte{}
+}
+
+// MarshalJSON converts the trace ID into a hex string enclosed in quotes.
+func (tid TraceID) MarshalJSON() ([]byte, error) {
+ if tid.IsEmpty() {
+ return []byte(`""`), nil
+ }
+ return marshalJSON(tid[:])
+}
+
+// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in
+// quotes.
+func (tid *TraceID) UnmarshalJSON(data []byte) error {
+ *tid = [traceIDSize]byte{}
+ return unmarshalJSON(tid[:], data)
+}
+
+// SpanID is a custom data type that is used for all span IDs.
+type SpanID [spanIDSize]byte
+
+// String returns the hex string representation form of a SpanID.
+func (sid SpanID) String() string {
+ return hex.EncodeToString(sid[:])
+}
+
+// IsEmpty returns true if the span ID contains at least one non-zero byte.
+func (sid SpanID) IsEmpty() bool {
+ return sid == [spanIDSize]byte{}
+}
+
+// MarshalJSON converts span ID into a hex string enclosed in quotes.
+func (sid SpanID) MarshalJSON() ([]byte, error) {
+ if sid.IsEmpty() {
+ return []byte(`""`), nil
+ }
+ return marshalJSON(sid[:])
+}
+
+// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes.
+func (sid *SpanID) UnmarshalJSON(data []byte) error {
+ *sid = [spanIDSize]byte{}
+ return unmarshalJSON(sid[:], data)
+}
+
+// marshalJSON converts id into a hex string enclosed in quotes.
+func marshalJSON(id []byte) ([]byte, error) {
+ // Plus 2 quote chars at the start and end.
+ hexLen := hex.EncodedLen(len(id)) + 2
+
+ b := make([]byte, hexLen)
+ hex.Encode(b[1:hexLen-1], id)
+ b[0], b[hexLen-1] = '"', '"'
+
+ return b, nil
+}
+
+// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
+func unmarshalJSON(dst []byte, src []byte) error {
+ if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
+ src = src[1 : l-1]
+ }
+ nLen := len(src)
+ if nLen == 0 {
+ return nil
+ }
+
+ if len(dst) != hex.DecodedLen(nLen) {
+ return errors.New("invalid length for ID")
+ }
+
+ _, err := hex.Decode(dst, src)
+ if err != nil {
+ return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go
new file mode 100644
index 000000000..29e629d66
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go
@@ -0,0 +1,67 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+import (
+ "encoding/json"
+ "strconv"
+)
+
+// protoInt64 represents the protobuf encoding of integers which can be either
+// strings or integers.
+type protoInt64 int64
+
+// Int64 returns the protoInt64 as an int64.
+func (i *protoInt64) Int64() int64 { return int64(*i) }
+
+// UnmarshalJSON decodes both strings and integers.
+func (i *protoInt64) UnmarshalJSON(data []byte) error {
+ if data[0] == '"' {
+ var str string
+ if err := json.Unmarshal(data, &str); err != nil {
+ return err
+ }
+ parsedInt, err := strconv.ParseInt(str, 10, 64)
+ if err != nil {
+ return err
+ }
+ *i = protoInt64(parsedInt)
+ } else {
+ var parsedInt int64
+ if err := json.Unmarshal(data, &parsedInt); err != nil {
+ return err
+ }
+ *i = protoInt64(parsedInt)
+ }
+ return nil
+}
+
+// protoUint64 represents the protobuf encoding of integers which can be either
+// strings or integers.
+type protoUint64 uint64
+
+// Int64 returns the protoUint64 as a uint64.
+func (i *protoUint64) Uint64() uint64 { return uint64(*i) }
+
+// UnmarshalJSON decodes both strings and integers.
+func (i *protoUint64) UnmarshalJSON(data []byte) error {
+ if data[0] == '"' {
+ var str string
+ if err := json.Unmarshal(data, &str); err != nil {
+ return err
+ }
+ parsedUint, err := strconv.ParseUint(str, 10, 64)
+ if err != nil {
+ return err
+ }
+ *i = protoUint64(parsedUint)
+ } else {
+ var parsedUint uint64
+ if err := json.Unmarshal(data, &parsedUint); err != nil {
+ return err
+ }
+ *i = protoUint64(parsedUint)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go
new file mode 100644
index 000000000..cecad8bae
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go
@@ -0,0 +1,66 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Resource information.
+type Resource struct {
+ // Attrs are the set of attributes that describe the resource. Attribute
+ // keys MUST be unique (it is not allowed to have more than one attribute
+ // with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // DroppedAttrs is the number of dropped attributes. If the value
+ // is 0, then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
+func (r *Resource) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Resource type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Resource field: %#v", keyIface)
+ }
+
+ switch key {
+ case "attributes":
+ err = decoder.Decode(&r.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&r.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go
new file mode 100644
index 000000000..b6f2e28d4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go
@@ -0,0 +1,67 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Scope is the identifying values of the instrumentation scope.
+type Scope struct {
+ Name string `json:"name,omitempty"`
+ Version string `json:"version,omitempty"`
+ Attrs []Attr `json:"attributes,omitempty"`
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
+func (s *Scope) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Scope type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Scope field: %#v", keyIface)
+ }
+
+ switch key {
+ case "name":
+ err = decoder.Decode(&s.Name)
+ case "version":
+ err = decoder.Decode(&s.Version)
+ case "attributes":
+ err = decoder.Decode(&s.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&s.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go
new file mode 100644
index 000000000..a13a6b733
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go
@@ -0,0 +1,456 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "time"
+)
+
+// A Span represents a single operation performed by a single component of the
+// system.
+type Span struct {
+ // A unique identifier for a trace. All spans from the same trace share
+ // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
+ // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
+ //
+ // This field is required.
+ TraceID TraceID `json:"traceId,omitempty"`
+ // A unique identifier for a span within a trace, assigned when the span
+ // is created. The ID is an 8-byte array. An ID with all zeroes OR of length
+ // other than 8 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
+ //
+ // This field is required.
+ SpanID SpanID `json:"spanId,omitempty"`
+ // trace_state conveys information about request position in multiple distributed tracing graphs.
+ // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
+ // See also https://github.com/w3c/distributed-tracing for more details about this field.
+ TraceState string `json:"traceState,omitempty"`
+ // The `span_id` of this span's parent span. If this is a root span, then this
+ // field must be empty. The ID is an 8-byte array.
+ ParentSpanID SpanID `json:"parentSpanId,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether a span's parent
+ // is remote. The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // When creating span messages, if the message is logically forwarded from another source
+ // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD
+ // be copied as-is. If creating from a source that does not have an equivalent flags field
+ // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST
+ // be set to zero.
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ //
+ // [Optional].
+ Flags uint32 `json:"flags,omitempty"`
+ // A description of the span's operation.
+ //
+ // For example, the name can be a qualified method name or a file name
+ // and a line number where the operation is called. A best practice is to use
+ // the same display name at the same call point in an application.
+ // This makes it easier to correlate spans in different traces.
+ //
+ // This field is semantically required to be set to non-empty string.
+ // Empty value is equivalent to an unknown span name.
+ //
+ // This field is required.
+ Name string `json:"name"`
+ // Distinguishes between spans generated in a particular context. For example,
+ // two spans with the same name may be distinguished using `CLIENT` (caller)
+ // and `SERVER` (callee) to identify queueing latency associated with the span.
+ Kind SpanKind `json:"kind,omitempty"`
+ // start_time_unix_nano is the start time of the span. On the client side, this is the time
+ // kept by the local machine where the span execution starts. On the server side, this
+ // is the time when the server's application handler starts running.
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+ //
+ // This field is semantically required and it is expected that end_time >= start_time.
+ StartTime time.Time `json:"startTimeUnixNano,omitempty"`
+ // end_time_unix_nano is the end time of the span. On the client side, this is the time
+ // kept by the local machine where the span execution ends. On the server side, this
+ // is the time when the server application handler stops running.
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+ //
+ // This field is semantically required and it is expected that end_time >= start_time.
+ EndTime time.Time `json:"endTimeUnixNano,omitempty"`
+ // attributes is a collection of key/value pairs. Note, global attributes
+ // like server name can be set using the resource API. Examples of attributes:
+ //
+ // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
+ // "/http/server_latency": 300
+ // "example.com/myattribute": true
+ // "example.com/score": 10.239
+ //
+ // The OpenTelemetry API specification further restricts the allowed value types:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of attributes that were discarded. Attributes
+ // can be discarded because their keys are too long or because there are too many
+ // attributes. If this value is 0, then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+ // events is a collection of Event items.
+ Events []*SpanEvent `json:"events,omitempty"`
+ // dropped_events_count is the number of dropped events. If the value is 0, then no
+ // events were dropped.
+ DroppedEvents uint32 `json:"droppedEventsCount,omitempty"`
+ // links is a collection of Links, which are references from this span to a span
+ // in the same or different trace.
+ Links []*SpanLink `json:"links,omitempty"`
+ // dropped_links_count is the number of dropped links after the maximum size was
+ // enforced. If this value is 0, then no links were dropped.
+ DroppedLinks uint32 `json:"droppedLinksCount,omitempty"`
+ // An optional final status for this span. Semantically when Status isn't set, it means
+ // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0).
+ Status *Status `json:"status,omitempty"`
+}
+
+// MarshalJSON encodes s into OTLP formatted JSON.
+func (s Span) MarshalJSON() ([]byte, error) {
+ startT := s.StartTime.UnixNano()
+ if s.StartTime.IsZero() || startT < 0 {
+ startT = 0
+ }
+
+ endT := s.EndTime.UnixNano()
+ if s.EndTime.IsZero() || endT < 0 {
+ endT = 0
+ }
+
+ // Override non-empty default SpanID marshal and omitempty.
+ var parentSpanId string
+ if !s.ParentSpanID.IsEmpty() {
+ b := make([]byte, hex.EncodedLen(spanIDSize))
+ hex.Encode(b, s.ParentSpanID[:])
+ parentSpanId = string(b)
+ }
+
+ type Alias Span
+ return json.Marshal(struct {
+ Alias
+ ParentSpanID string `json:"parentSpanId,omitempty"`
+ StartTime uint64 `json:"startTimeUnixNano,omitempty"`
+ EndTime uint64 `json:"endTimeUnixNano,omitempty"`
+ }{
+ Alias: Alias(s),
+ ParentSpanID: parentSpanId,
+ StartTime: uint64(startT),
+ EndTime: uint64(endT),
+ })
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s.
+func (s *Span) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Span type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Span field: %#v", keyIface)
+ }
+
+ switch key {
+ case "traceId", "trace_id":
+ err = decoder.Decode(&s.TraceID)
+ case "spanId", "span_id":
+ err = decoder.Decode(&s.SpanID)
+ case "traceState", "trace_state":
+ err = decoder.Decode(&s.TraceState)
+ case "parentSpanId", "parent_span_id":
+ err = decoder.Decode(&s.ParentSpanID)
+ case "flags":
+ err = decoder.Decode(&s.Flags)
+ case "name":
+ err = decoder.Decode(&s.Name)
+ case "kind":
+ err = decoder.Decode(&s.Kind)
+ case "startTimeUnixNano", "start_time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ s.StartTime = time.Unix(0, int64(val.Uint64()))
+ case "endTimeUnixNano", "end_time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ s.EndTime = time.Unix(0, int64(val.Uint64()))
+ case "attributes":
+ err = decoder.Decode(&s.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&s.DroppedAttrs)
+ case "events":
+ err = decoder.Decode(&s.Events)
+ case "droppedEventsCount", "dropped_events_count":
+ err = decoder.Decode(&s.DroppedEvents)
+ case "links":
+ err = decoder.Decode(&s.Links)
+ case "droppedLinksCount", "dropped_links_count":
+ err = decoder.Decode(&s.DroppedLinks)
+ case "status":
+ err = decoder.Decode(&s.Status)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// SpanFlags represents constants used to interpret the
+// Span.flags field, which is protobuf 'fixed32' type and is to
+// be used as bit-fields. Each non-zero value defined in this enum is
+// a bit-mask. To extract the bit-field, for example, use an
+// expression like:
+//
+// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
+//
+// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+//
+// Note that Span flags were introduced in version 1.1 of the
+// OpenTelemetry protocol. Older Span producers do not set this
+// field, consequently consumers should not rely on the absence of a
+// particular flag bit to indicate the presence of a particular feature.
+type SpanFlags int32
+
+const (
+ // Bits 0-7 are used for trace flags.
+ SpanFlagsTraceFlagsMask SpanFlags = 255
+ // Bits 8 and 9 are used to indicate that the parent span or link span is remote.
+ // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
+ // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
+ SpanFlagsContextHasIsRemoteMask SpanFlags = 256
+ // SpanFlagsContextHasIsRemoteMask indicates the Span is remote.
+ SpanFlagsContextIsRemoteMask SpanFlags = 512
+)
+
+// SpanKind is the type of span. Can be used to specify additional relationships between spans
+// in addition to a parent/child relationship.
+type SpanKind int32
+
+const (
+ // Indicates that the span represents an internal operation within an application,
+ // as opposed to an operation happening at the boundaries. Default value.
+ SpanKindInternal SpanKind = 1
+ // Indicates that the span covers server-side handling of an RPC or other
+ // remote network request.
+ SpanKindServer SpanKind = 2
+ // Indicates that the span describes a request to some remote service.
+ SpanKindClient SpanKind = 3
+ // Indicates that the span describes a producer sending a message to a broker.
+ // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
+ // between producer and consumer spans. A PRODUCER span ends when the message was accepted
+ // by the broker while the logical processing of the message might span a much longer time.
+ SpanKindProducer SpanKind = 4
+ // Indicates that the span describes consumer receiving a message from a broker.
+ // Like the PRODUCER kind, there is often no direct critical path latency relationship
+ // between producer and consumer spans.
+ SpanKindConsumer SpanKind = 5
+)
+
+// Event is a time-stamped annotation of the span, consisting of user-supplied
+// text description and key-value pairs.
+type SpanEvent struct {
+ // time_unix_nano is the time the event occurred.
+ Time time.Time `json:"timeUnixNano,omitempty"`
+ // name of the event.
+ // This field is semantically required to be set to non-empty string.
+ Name string `json:"name,omitempty"`
+ // attributes is a collection of attribute key/value pairs on the event.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// MarshalJSON encodes e into OTLP formatted JSON.
+func (e SpanEvent) MarshalJSON() ([]byte, error) {
+ t := e.Time.UnixNano()
+ if e.Time.IsZero() || t < 0 {
+ t = 0
+ }
+
+ type Alias SpanEvent
+ return json.Marshal(struct {
+ Alias
+ Time uint64 `json:"timeUnixNano,omitempty"`
+ }{
+ Alias: Alias(e),
+ Time: uint64(t),
+ })
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se.
+func (se *SpanEvent) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid SpanEvent type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid SpanEvent field: %#v", keyIface)
+ }
+
+ switch key {
+ case "timeUnixNano", "time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ se.Time = time.Unix(0, int64(val.Uint64()))
+ case "name":
+ err = decoder.Decode(&se.Name)
+ case "attributes":
+ err = decoder.Decode(&se.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&se.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A pointer from the current span to another span in the same trace or in a
+// different trace. For example, this can be used in batching operations,
+// where a single batch handler processes multiple requests from different
+// traces or when the handler receives a request from a different project.
+type SpanLink struct {
+ // A unique identifier of a trace that this linked span is part of. The ID is a
+ // 16-byte array.
+ TraceID TraceID `json:"traceId,omitempty"`
+ // A unique identifier for the linked span. The ID is an 8-byte array.
+ SpanID SpanID `json:"spanId,omitempty"`
+ // The trace_state associated with the link.
+ TraceState string `json:"traceState,omitempty"`
+ // attributes is a collection of attribute key/value pairs on the link.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether the link is remote.
+ // The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero.
+ //
+ // [Optional].
+ Flags uint32 `json:"flags,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl.
+func (sl *SpanLink) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid SpanLink type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid SpanLink field: %#v", keyIface)
+ }
+
+ switch key {
+ case "traceId", "trace_id":
+ err = decoder.Decode(&sl.TraceID)
+ case "spanId", "span_id":
+ err = decoder.Decode(&sl.SpanID)
+ case "traceState", "trace_state":
+ err = decoder.Decode(&sl.TraceState)
+ case "attributes":
+ err = decoder.Decode(&sl.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&sl.DroppedAttrs)
+ case "flags":
+ err = decoder.Decode(&sl.Flags)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go
new file mode 100644
index 000000000..1217776ea
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go
@@ -0,0 +1,40 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+// For the semantics of status codes see
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
+type StatusCode int32
+
+const (
+ // The default status.
+ StatusCodeUnset StatusCode = 0
+ // The Span has been validated by an Application developer or Operator to
+ // have completed successfully.
+ StatusCodeOK StatusCode = 1
+ // The Span contains an error.
+ StatusCodeError StatusCode = 2
+)
+
+var statusCodeStrings = []string{
+ "Unset",
+ "OK",
+ "Error",
+}
+
+func (s StatusCode) String() string {
+ if s >= 0 && int(s) < len(statusCodeStrings) {
+ return statusCodeStrings[s]
+ }
+ return ""
+}
+
+// The Status type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs.
+type Status struct {
+ // A developer-facing human readable error message.
+ Message string `json:"message,omitempty"`
+ // The status code.
+ Code StatusCode `json:"code,omitempty"`
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go
new file mode 100644
index 000000000..69a348f0f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go
@@ -0,0 +1,189 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Traces represents the traces data that can be stored in a persistent storage,
+// OR can be embedded by other protocols that transfer OTLP traces data but do
+// not implement the OTLP protocol.
+//
+// The main difference between this message and collector protocol is that
+// in this message there will not be any "control" or "metadata" specific to
+// OTLP protocol.
+//
+// When new fields are added into this message, the OTLP request MUST be updated
+// as well.
+type Traces struct {
+ // An array of ResourceSpans.
+ // For data coming from a single resource this array will typically contain
+ // one element. Intermediary nodes that receive data from multiple origins
+ // typically batch the data before forwarding further and in that case this
+ // array will contain multiple elements.
+ ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td.
+func (td *Traces) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid TracesData type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid TracesData field: %#v", keyIface)
+ }
+
+ switch key {
+ case "resourceSpans", "resource_spans":
+ err = decoder.Decode(&td.ResourceSpans)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A collection of ScopeSpans from a Resource.
+type ResourceSpans struct {
+ // The resource for the spans in this message.
+ // If this field is not set then no resource info is known.
+ Resource Resource `json:"resource"`
+ // A list of ScopeSpans that originate from a resource.
+ ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"`
+ // This schema_url applies to the data in the "resource" field. It does not apply
+ // to the data in the "scope_spans" field which have their own schema_url field.
+ SchemaURL string `json:"schemaUrl,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs.
+func (rs *ResourceSpans) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid ResourceSpans type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface)
+ }
+
+ switch key {
+ case "resource":
+ err = decoder.Decode(&rs.Resource)
+ case "scopeSpans", "scope_spans":
+ err = decoder.Decode(&rs.ScopeSpans)
+ case "schemaUrl", "schema_url":
+ err = decoder.Decode(&rs.SchemaURL)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A collection of Spans produced by an InstrumentationScope.
+type ScopeSpans struct {
+ // The instrumentation scope information for the spans in this message.
+ // Semantically when InstrumentationScope isn't set, it is equivalent with
+ // an empty instrumentation scope name (unknown).
+ Scope *Scope `json:"scope"`
+ // A list of Spans that originate from an instrumentation scope.
+ Spans []*Span `json:"spans,omitempty"`
+ // The Schema URL, if known. This is the identifier of the Schema that the span data
+ // is recorded in. To learn more about Schema URL see
+ // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+ // This schema_url applies to all spans and span events in the "spans" field.
+ SchemaURL string `json:"schemaUrl,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss.
+func (ss *ScopeSpans) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid ScopeSpans type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface)
+ }
+
+ switch key {
+ case "scope":
+ err = decoder.Decode(&ss.Scope)
+ case "spans":
+ err = decoder.Decode(&ss.Spans)
+ case "schemaUrl", "schema_url":
+ err = decoder.Decode(&ss.SchemaURL)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go
new file mode 100644
index 000000000..0dd01b063
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go
@@ -0,0 +1,452 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:generate stringer -type=ValueKind -trimprefix=ValueKind
+
+package telemetry
+
+import (
+ "bytes"
+ "cmp"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "slices"
+ "strconv"
+ "unsafe"
+)
+
+// A Value represents a structured value.
+// A zero value is valid and represents an empty value.
+type Value struct {
+ // Ensure forward compatibility by explicitly making this not comparable.
+ noCmp [0]func() //nolint: unused // This is indeed used.
+
+ // num holds the value for Int64, Float64, and Bool. It holds the length
+ // for String, Bytes, Slice, Map.
+ num uint64
+ // any holds either the KindBool, KindInt64, KindFloat64, stringptr,
+ // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64
+ // then the value of Value is in num as described above. Otherwise, it
+ // contains the value wrapped in the appropriate type.
+ any any
+}
+
+type (
+ // sliceptr represents a value in Value.any for KindString Values.
+ stringptr *byte
+ // bytesptr represents a value in Value.any for KindBytes Values.
+ bytesptr *byte
+ // sliceptr represents a value in Value.any for KindSlice Values.
+ sliceptr *Value
+ // mapptr represents a value in Value.any for KindMap Values.
+ mapptr *Attr
+)
+
+// ValueKind is the kind of a [Value].
+type ValueKind int
+
+// ValueKind values.
+const (
+ ValueKindEmpty ValueKind = iota
+ ValueKindBool
+ ValueKindFloat64
+ ValueKindInt64
+ ValueKindString
+ ValueKindBytes
+ ValueKindSlice
+ ValueKindMap
+)
+
+var valueKindStrings = []string{
+ "Empty",
+ "Bool",
+ "Float64",
+ "Int64",
+ "String",
+ "Bytes",
+ "Slice",
+ "Map",
+}
+
+func (k ValueKind) String() string {
+ if k >= 0 && int(k) < len(valueKindStrings) {
+ return valueKindStrings[k]
+ }
+ return ""
+}
+
+// StringValue returns a new [Value] for a string.
+func StringValue(v string) Value {
+ return Value{
+ num: uint64(len(v)),
+ any: stringptr(unsafe.StringData(v)),
+ }
+}
+
+// IntValue returns a [Value] for an int.
+func IntValue(v int) Value { return Int64Value(int64(v)) }
+
+// Int64Value returns a [Value] for an int64.
+func Int64Value(v int64) Value {
+ return Value{num: uint64(v), any: ValueKindInt64}
+}
+
+// Float64Value returns a [Value] for a float64.
+func Float64Value(v float64) Value {
+ return Value{num: math.Float64bits(v), any: ValueKindFloat64}
+}
+
+// BoolValue returns a [Value] for a bool.
+func BoolValue(v bool) Value { //nolint:revive // Not a control flag.
+ var n uint64
+ if v {
+ n = 1
+ }
+ return Value{num: n, any: ValueKindBool}
+}
+
+// BytesValue returns a [Value] for a byte slice. The passed slice must not be
+// changed after it is passed.
+func BytesValue(v []byte) Value {
+ return Value{
+ num: uint64(len(v)),
+ any: bytesptr(unsafe.SliceData(v)),
+ }
+}
+
+// SliceValue returns a [Value] for a slice of [Value]. The passed slice must
+// not be changed after it is passed.
+func SliceValue(vs ...Value) Value {
+ return Value{
+ num: uint64(len(vs)),
+ any: sliceptr(unsafe.SliceData(vs)),
+ }
+}
+
+// MapValue returns a new [Value] for a slice of key-value pairs. The passed
+// slice must not be changed after it is passed.
+func MapValue(kvs ...Attr) Value {
+ return Value{
+ num: uint64(len(kvs)),
+ any: mapptr(unsafe.SliceData(kvs)),
+ }
+}
+
+// AsString returns the value held by v as a string.
+func (v Value) AsString() string {
+ if sp, ok := v.any.(stringptr); ok {
+ return unsafe.String(sp, v.num)
+ }
+ // TODO: error handle
+ return ""
+}
+
+// asString returns the value held by v as a string. It will panic if the Value
+// is not KindString.
+func (v Value) asString() string {
+ return unsafe.String(v.any.(stringptr), v.num)
+}
+
+// AsInt64 returns the value held by v as an int64.
+func (v Value) AsInt64() int64 {
+ if v.Kind() != ValueKindInt64 {
+ // TODO: error handle
+ return 0
+ }
+ return v.asInt64()
+}
+
+// asInt64 returns the value held by v as an int64. If v is not of KindInt64,
+// this will return garbage.
+func (v Value) asInt64() int64 {
+ // Assumes v.num was a valid int64 (overflow not checked).
+ return int64(v.num) // nolint: gosec
+}
+
+// AsBool returns the value held by v as a bool.
+func (v Value) AsBool() bool {
+ if v.Kind() != ValueKindBool {
+ // TODO: error handle
+ return false
+ }
+ return v.asBool()
+}
+
+// asBool returns the value held by v as a bool. If v is not of KindBool, this
+// will return garbage.
+func (v Value) asBool() bool { return v.num == 1 }
+
+// AsFloat64 returns the value held by v as a float64.
+func (v Value) AsFloat64() float64 {
+ if v.Kind() != ValueKindFloat64 {
+ // TODO: error handle
+ return 0
+ }
+ return v.asFloat64()
+}
+
+// asFloat64 returns the value held by v as a float64. If v is not of
+// KindFloat64, this will return garbage.
+func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) }
+
+// AsBytes returns the value held by v as a []byte.
+func (v Value) AsBytes() []byte {
+ if sp, ok := v.any.(bytesptr); ok {
+ return unsafe.Slice((*byte)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asBytes returns the value held by v as a []byte. It will panic if the Value
+// is not KindBytes.
+func (v Value) asBytes() []byte {
+ return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num)
+}
+
+// AsSlice returns the value held by v as a []Value.
+func (v Value) AsSlice() []Value {
+ if sp, ok := v.any.(sliceptr); ok {
+ return unsafe.Slice((*Value)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asSlice returns the value held by v as a []Value. It will panic if the Value
+// is not KindSlice.
+func (v Value) asSlice() []Value {
+ return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num)
+}
+
+// AsMap returns the value held by v as a []Attr.
+func (v Value) AsMap() []Attr {
+ if sp, ok := v.any.(mapptr); ok {
+ return unsafe.Slice((*Attr)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asMap returns the value held by v as a []Attr. It will panic if the
+// Value is not KindMap.
+func (v Value) asMap() []Attr {
+ return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num)
+}
+
+// Kind returns the Kind of v.
+func (v Value) Kind() ValueKind {
+ switch x := v.any.(type) {
+ case ValueKind:
+ return x
+ case stringptr:
+ return ValueKindString
+ case bytesptr:
+ return ValueKindBytes
+ case sliceptr:
+ return ValueKindSlice
+ case mapptr:
+ return ValueKindMap
+ default:
+ return ValueKindEmpty
+ }
+}
+
+// Empty returns if v does not hold any value.
+func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty }
+
+// Equal returns if v is equal to w.
+func (v Value) Equal(w Value) bool {
+ k1 := v.Kind()
+ k2 := w.Kind()
+ if k1 != k2 {
+ return false
+ }
+ switch k1 {
+ case ValueKindInt64, ValueKindBool:
+ return v.num == w.num
+ case ValueKindString:
+ return v.asString() == w.asString()
+ case ValueKindFloat64:
+ return v.asFloat64() == w.asFloat64()
+ case ValueKindSlice:
+ return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal)
+ case ValueKindMap:
+ sv := sortMap(v.asMap())
+ sw := sortMap(w.asMap())
+ return slices.EqualFunc(sv, sw, Attr.Equal)
+ case ValueKindBytes:
+ return bytes.Equal(v.asBytes(), w.asBytes())
+ case ValueKindEmpty:
+ return true
+ default:
+ // TODO: error handle
+ return false
+ }
+}
+
+func sortMap(m []Attr) []Attr {
+ sm := make([]Attr, len(m))
+ copy(sm, m)
+ slices.SortFunc(sm, func(a, b Attr) int {
+ return cmp.Compare(a.Key, b.Key)
+ })
+
+ return sm
+}
+
+// String returns Value's value as a string, formatted like [fmt.Sprint].
+//
+// The returned string is meant for debugging;
+// the string representation is not stable.
+func (v Value) String() string {
+ switch v.Kind() {
+ case ValueKindString:
+ return v.asString()
+ case ValueKindInt64:
+ // Assumes v.num was a valid int64 (overflow not checked).
+ return strconv.FormatInt(int64(v.num), 10) // nolint: gosec
+ case ValueKindFloat64:
+ return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64)
+ case ValueKindBool:
+ return strconv.FormatBool(v.asBool())
+ case ValueKindBytes:
+ return fmt.Sprint(v.asBytes())
+ case ValueKindMap:
+ return fmt.Sprint(v.asMap())
+ case ValueKindSlice:
+ return fmt.Sprint(v.asSlice())
+ case ValueKindEmpty:
+ return ""
+ default:
+ // Try to handle this as gracefully as possible.
+ //
+ // Don't panic here. The goal here is to have developers find this
+ // first if a slog.Kind is is not handled. It is
+ // preferable to have user's open issue asking why their attributes
+ // have a "unhandled: " prefix than say that their code is panicking.
+ return fmt.Sprintf("", v.Kind())
+ }
+}
+
+// MarshalJSON encodes v into OTLP formatted JSON.
+func (v *Value) MarshalJSON() ([]byte, error) {
+ switch v.Kind() {
+ case ValueKindString:
+ return json.Marshal(struct {
+ Value string `json:"stringValue"`
+ }{v.asString()})
+ case ValueKindInt64:
+ return json.Marshal(struct {
+ Value string `json:"intValue"`
+ }{strconv.FormatInt(int64(v.num), 10)})
+ case ValueKindFloat64:
+ return json.Marshal(struct {
+ Value float64 `json:"doubleValue"`
+ }{v.asFloat64()})
+ case ValueKindBool:
+ return json.Marshal(struct {
+ Value bool `json:"boolValue"`
+ }{v.asBool()})
+ case ValueKindBytes:
+ return json.Marshal(struct {
+ Value []byte `json:"bytesValue"`
+ }{v.asBytes()})
+ case ValueKindMap:
+ return json.Marshal(struct {
+ Value struct {
+ Values []Attr `json:"values"`
+ } `json:"kvlistValue"`
+ }{struct {
+ Values []Attr `json:"values"`
+ }{v.asMap()}})
+ case ValueKindSlice:
+ return json.Marshal(struct {
+ Value struct {
+ Values []Value `json:"values"`
+ } `json:"arrayValue"`
+ }{struct {
+ Values []Value `json:"values"`
+ }{v.asSlice()}})
+ case ValueKindEmpty:
+ return nil, nil
+ default:
+ return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String())
+ }
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v.
+func (v *Value) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Value type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Value key: %#v", keyIface)
+ }
+
+ switch key {
+ case "stringValue", "string_value":
+ var val string
+ err = decoder.Decode(&val)
+ *v = StringValue(val)
+ case "boolValue", "bool_value":
+ var val bool
+ err = decoder.Decode(&val)
+ *v = BoolValue(val)
+ case "intValue", "int_value":
+ var val protoInt64
+ err = decoder.Decode(&val)
+ *v = Int64Value(val.Int64())
+ case "doubleValue", "double_value":
+ var val float64
+ err = decoder.Decode(&val)
+ *v = Float64Value(val)
+ case "bytesValue", "bytes_value":
+ var val64 string
+ if err := decoder.Decode(&val64); err != nil {
+ return err
+ }
+ var val []byte
+ val, err = base64.StdEncoding.DecodeString(val64)
+ *v = BytesValue(val)
+ case "arrayValue", "array_value":
+ var val struct{ Values []Value }
+ err = decoder.Decode(&val)
+ *v = SliceValue(val.Values...)
+ case "kvlistValue", "kvlist_value":
+ var val struct{ Values []Attr }
+ err = decoder.Decode(&val)
+ *v = MapValue(val.Values...)
+ default:
+ // Skip unknown.
+ continue
+ }
+ // Use first valid. Ignore the rest.
+ return err
+ }
+
+ // Only unknown fields. Return nil without unmarshaling any value.
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/limit.go b/vendor/go.opentelemetry.io/auto/sdk/limit.go
new file mode 100644
index 000000000..86babf1a8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/limit.go
@@ -0,0 +1,94 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sdk
+
+import (
+ "log/slog"
+ "os"
+ "strconv"
+)
+
+// maxSpan are the span limits resolved during startup.
+var maxSpan = newSpanLimits()
+
+type spanLimits struct {
+ // Attrs is the number of allowed attributes for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the
+ // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if
+ // that is not set, is used.
+ Attrs int
+ // AttrValueLen is the maximum attribute value length allowed for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the
+ // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1
+ // if that is not set, is used.
+ AttrValueLen int
+ // Events is the number of allowed events for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set.
+ Events int
+ // EventAttrs is the number of allowed attributes for a span event.
+ //
+ // The is resolved from the environment variable value for the
+ // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set.
+ EventAttrs int
+ // Links is the number of allowed Links for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set.
+ Links int
+ // LinkAttrs is the number of allowed attributes for a span link.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set.
+ LinkAttrs int
+}
+
+func newSpanLimits() spanLimits {
+ return spanLimits{
+ Attrs: firstEnv(
+ 128,
+ "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT",
+ "OTEL_ATTRIBUTE_COUNT_LIMIT",
+ ),
+ AttrValueLen: firstEnv(
+ -1, // Unlimited.
+ "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT",
+ "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT",
+ ),
+ Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"),
+ EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"),
+ Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"),
+ LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"),
+ }
+}
+
+// firstEnv returns the parsed integer value of the first matching environment
+// variable from keys. The defaultVal is returned if the value is not an
+// integer or no match is found.
+func firstEnv(defaultVal int, keys ...string) int {
+ for _, key := range keys {
+ strV := os.Getenv(key)
+ if strV == "" {
+ continue
+ }
+
+ v, err := strconv.Atoi(strV)
+ if err == nil {
+ return v
+ }
+ slog.Warn(
+ "invalid limit environment variable",
+ "error", err,
+ "key", key,
+ "value", strV,
+ )
+ }
+
+ return defaultVal
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go
new file mode 100644
index 000000000..6ebea12a9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/span.go
@@ -0,0 +1,432 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sdk
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+ "unicode/utf8"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/noop"
+
+ "go.opentelemetry.io/auto/sdk/internal/telemetry"
+)
+
+type span struct {
+ noop.Span
+
+ spanContext trace.SpanContext
+ sampled atomic.Bool
+
+ mu sync.Mutex
+ traces *telemetry.Traces
+ span *telemetry.Span
+}
+
+func (s *span) SpanContext() trace.SpanContext {
+ if s == nil {
+ return trace.SpanContext{}
+ }
+ // s.spanContext is immutable, do not acquire lock s.mu.
+ return s.spanContext
+}
+
+func (s *span) IsRecording() bool {
+ if s == nil {
+ return false
+ }
+
+ return s.sampled.Load()
+}
+
+func (s *span) SetStatus(c codes.Code, msg string) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if s.span.Status == nil {
+ s.span.Status = new(telemetry.Status)
+ }
+
+ s.span.Status.Message = msg
+
+ switch c {
+ case codes.Unset:
+ s.span.Status.Code = telemetry.StatusCodeUnset
+ case codes.Error:
+ s.span.Status.Code = telemetry.StatusCodeError
+ case codes.Ok:
+ s.span.Status.Code = telemetry.StatusCodeOK
+ }
+}
+
+func (s *span) SetAttributes(attrs ...attribute.KeyValue) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ limit := maxSpan.Attrs
+ if limit == 0 {
+ // No attributes allowed.
+ s.span.DroppedAttrs += uint32(len(attrs))
+ return
+ }
+
+ m := make(map[string]int)
+ for i, a := range s.span.Attrs {
+ m[a.Key] = i
+ }
+
+ for _, a := range attrs {
+ val := convAttrValue(a.Value)
+ if val.Empty() {
+ s.span.DroppedAttrs++
+ continue
+ }
+
+ if idx, ok := m[string(a.Key)]; ok {
+ s.span.Attrs[idx] = telemetry.Attr{
+ Key: string(a.Key),
+ Value: val,
+ }
+ } else if limit < 0 || len(s.span.Attrs) < limit {
+ s.span.Attrs = append(s.span.Attrs, telemetry.Attr{
+ Key: string(a.Key),
+ Value: val,
+ })
+ m[string(a.Key)] = len(s.span.Attrs) - 1
+ } else {
+ s.span.DroppedAttrs++
+ }
+ }
+}
+
+// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The
+// number of dropped attributes is also returned.
+func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) {
+ if limit == 0 {
+ return nil, uint32(len(attrs))
+ }
+
+ if limit < 0 {
+ // Unlimited.
+ return convAttrs(attrs), 0
+ }
+
+ limit = min(len(attrs), limit)
+ return convAttrs(attrs[:limit]), uint32(len(attrs) - limit)
+}
+
+func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr {
+ if len(attrs) == 0 {
+ // Avoid allocations if not necessary.
+ return nil
+ }
+
+ out := make([]telemetry.Attr, 0, len(attrs))
+ for _, attr := range attrs {
+ key := string(attr.Key)
+ val := convAttrValue(attr.Value)
+ if val.Empty() {
+ continue
+ }
+ out = append(out, telemetry.Attr{Key: key, Value: val})
+ }
+ return out
+}
+
+func convAttrValue(value attribute.Value) telemetry.Value {
+ switch value.Type() {
+ case attribute.BOOL:
+ return telemetry.BoolValue(value.AsBool())
+ case attribute.INT64:
+ return telemetry.Int64Value(value.AsInt64())
+ case attribute.FLOAT64:
+ return telemetry.Float64Value(value.AsFloat64())
+ case attribute.STRING:
+ v := truncate(maxSpan.AttrValueLen, value.AsString())
+ return telemetry.StringValue(v)
+ case attribute.BOOLSLICE:
+ slice := value.AsBoolSlice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.BoolValue(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.INT64SLICE:
+ slice := value.AsInt64Slice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.Int64Value(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.FLOAT64SLICE:
+ slice := value.AsFloat64Slice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.Float64Value(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.STRINGSLICE:
+ slice := value.AsStringSlice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ v = truncate(maxSpan.AttrValueLen, v)
+ out = append(out, telemetry.StringValue(v))
+ }
+ return telemetry.SliceValue(out...)
+ }
+ return telemetry.Value{}
+}
+
+// truncate returns a truncated version of s such that it contains less than
+// the limit number of characters. Truncation is applied by returning the limit
+// number of valid characters contained in s.
+//
+// If limit is negative, it returns the original string.
+//
+// UTF-8 is supported. When truncating, all invalid characters are dropped
+// before applying truncation.
+//
+// If s already contains less than the limit number of bytes, it is returned
+// unchanged. No invalid characters are removed.
+func truncate(limit int, s string) string {
+ // This prioritize performance in the following order based on the most
+ // common expected use-cases.
+ //
+ // - Short values less than the default limit (128).
+ // - Strings with valid encodings that exceed the limit.
+ // - No limit.
+ // - Strings with invalid encodings that exceed the limit.
+ if limit < 0 || len(s) <= limit {
+ return s
+ }
+
+ // Optimistically, assume all valid UTF-8.
+ var b strings.Builder
+ count := 0
+ for i, c := range s {
+ if c != utf8.RuneError {
+ count++
+ if count > limit {
+ return s[:i]
+ }
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // Invalid encoding.
+ b.Grow(len(s) - 1)
+ _, _ = b.WriteString(s[:i])
+ s = s[i:]
+ break
+ }
+ }
+
+ // Fast-path, no invalid input.
+ if b.Cap() == 0 {
+ return s
+ }
+
+ // Truncate while validating UTF-8.
+ for i := 0; i < len(s) && count < limit; {
+ c := s[i]
+ if c < utf8.RuneSelf {
+ // Optimization for single byte runes (common case).
+ _ = b.WriteByte(c)
+ i++
+ count++
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // We checked for all 1-byte runes above, this is a RuneError.
+ i++
+ continue
+ }
+
+ _, _ = b.WriteString(s[i : i+size])
+ i += size
+ count++
+ }
+
+ return b.String()
+}
+
+func (s *span) End(opts ...trace.SpanEndOption) {
+ if s == nil || !s.sampled.Swap(false) {
+ return
+ }
+
+ // s.end exists so the lock (s.mu) is not held while s.ended is called.
+ s.ended(s.end(opts))
+}
+
+func (s *span) end(opts []trace.SpanEndOption) []byte {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ cfg := trace.NewSpanEndConfig(opts...)
+ if t := cfg.Timestamp(); !t.IsZero() {
+ s.span.EndTime = cfg.Timestamp()
+ } else {
+ s.span.EndTime = time.Now()
+ }
+
+ b, _ := json.Marshal(s.traces) // TODO: do not ignore this error.
+ return b
+}
+
+// Expected to be implemented in eBPF.
+//
+//go:noinline
+func (*span) ended(buf []byte) { ended(buf) }
+
+// ended is used for testing.
+var ended = func([]byte) {}
+
+func (s *span) RecordError(err error, opts ...trace.EventOption) {
+ if s == nil || err == nil || !s.sampled.Load() {
+ return
+ }
+
+ cfg := trace.NewEventConfig(opts...)
+
+ attrs := cfg.Attributes()
+ attrs = append(attrs,
+ semconv.ExceptionType(typeStr(err)),
+ semconv.ExceptionMessage(err.Error()),
+ )
+ if cfg.StackTrace() {
+ buf := make([]byte, 2048)
+ n := runtime.Stack(buf, false)
+ attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n])))
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs)
+}
+
+func typeStr(i any) string {
+ t := reflect.TypeOf(i)
+ if t.PkgPath() == "" && t.Name() == "" {
+ // Likely a builtin type.
+ return t.String()
+ }
+ return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+}
+
+func (s *span) AddEvent(name string, opts ...trace.EventOption) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ cfg := trace.NewEventConfig(opts...)
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.addEvent(name, cfg.Timestamp(), cfg.Attributes())
+}
+
+// addEvent adds an event with name and attrs at tStamp to the span. The span
+// lock (s.mu) needs to be held by the caller.
+func (s *span) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) {
+ limit := maxSpan.Events
+
+ if limit == 0 {
+ s.span.DroppedEvents++
+ return
+ }
+
+ if limit > 0 && len(s.span.Events) == limit {
+ // Drop head while avoiding allocation of more capacity.
+ copy(s.span.Events[:limit-1], s.span.Events[1:])
+ s.span.Events = s.span.Events[:limit-1]
+ s.span.DroppedEvents++
+ }
+
+ e := &telemetry.SpanEvent{Time: tStamp, Name: name}
+ e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs)
+
+ s.span.Events = append(s.span.Events, e)
+}
+
+func (s *span) AddLink(link trace.Link) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ l := maxSpan.Links
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if l == 0 {
+ s.span.DroppedLinks++
+ return
+ }
+
+ if l > 0 && len(s.span.Links) == l {
+ // Drop head while avoiding allocation of more capacity.
+ copy(s.span.Links[:l-1], s.span.Links[1:])
+ s.span.Links = s.span.Links[:l-1]
+ s.span.DroppedLinks++
+ }
+
+ s.span.Links = append(s.span.Links, convLink(link))
+}
+
+func convLinks(links []trace.Link) []*telemetry.SpanLink {
+ out := make([]*telemetry.SpanLink, 0, len(links))
+ for _, link := range links {
+ out = append(out, convLink(link))
+ }
+ return out
+}
+
+func convLink(link trace.Link) *telemetry.SpanLink {
+ l := &telemetry.SpanLink{
+ TraceID: telemetry.TraceID(link.SpanContext.TraceID()),
+ SpanID: telemetry.SpanID(link.SpanContext.SpanID()),
+ TraceState: link.SpanContext.TraceState().String(),
+ Flags: uint32(link.SpanContext.TraceFlags()),
+ }
+ l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes)
+
+ return l
+}
+
+func (s *span) SetName(name string) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.span.Name = name
+}
+
+func (*span) TracerProvider() trace.TracerProvider { return TracerProvider() }
diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go
new file mode 100644
index 000000000..cbcfabde3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go
@@ -0,0 +1,124 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sdk
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/noop"
+
+ "go.opentelemetry.io/auto/sdk/internal/telemetry"
+)
+
+type tracer struct {
+ noop.Tracer
+
+ name, schemaURL, version string
+}
+
+var _ trace.Tracer = tracer{}
+
+func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
+ var psc trace.SpanContext
+ sampled := true
+ span := new(span)
+
+ // Ask eBPF for sampling decision and span context info.
+ t.start(ctx, span, &psc, &sampled, &span.spanContext)
+
+ span.sampled.Store(sampled)
+
+ ctx = trace.ContextWithSpan(ctx, span)
+
+ if sampled {
+ // Only build traces if sampled.
+ cfg := trace.NewSpanStartConfig(opts...)
+ span.traces, span.span = t.traces(name, cfg, span.spanContext, psc)
+ }
+
+ return ctx, span
+}
+
+// Expected to be implemented in eBPF.
+//
+//go:noinline
+func (t *tracer) start(
+ ctx context.Context,
+ spanPtr *span,
+ psc *trace.SpanContext,
+ sampled *bool,
+ sc *trace.SpanContext,
+) {
+ start(ctx, spanPtr, psc, sampled, sc)
+}
+
+// start is used for testing.
+var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {}
+
+func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) {
+ span := &telemetry.Span{
+ TraceID: telemetry.TraceID(sc.TraceID()),
+ SpanID: telemetry.SpanID(sc.SpanID()),
+ Flags: uint32(sc.TraceFlags()),
+ TraceState: sc.TraceState().String(),
+ ParentSpanID: telemetry.SpanID(psc.SpanID()),
+ Name: name,
+ Kind: spanKind(cfg.SpanKind()),
+ }
+
+ span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes())
+
+ links := cfg.Links()
+ if limit := maxSpan.Links; limit == 0 {
+ span.DroppedLinks = uint32(len(links))
+ } else {
+ if limit > 0 {
+ n := max(len(links)-limit, 0)
+ span.DroppedLinks = uint32(n)
+ links = links[n:]
+ }
+ span.Links = convLinks(links)
+ }
+
+ if t := cfg.Timestamp(); !t.IsZero() {
+ span.StartTime = cfg.Timestamp()
+ } else {
+ span.StartTime = time.Now()
+ }
+
+ return &telemetry.Traces{
+ ResourceSpans: []*telemetry.ResourceSpans{
+ {
+ ScopeSpans: []*telemetry.ScopeSpans{
+ {
+ Scope: &telemetry.Scope{
+ Name: t.name,
+ Version: t.version,
+ },
+ Spans: []*telemetry.Span{span},
+ SchemaURL: t.schemaURL,
+ },
+ },
+ },
+ },
+ }, span
+}
+
+func spanKind(kind trace.SpanKind) telemetry.SpanKind {
+ switch kind {
+ case trace.SpanKindInternal:
+ return telemetry.SpanKindInternal
+ case trace.SpanKindServer:
+ return telemetry.SpanKindServer
+ case trace.SpanKindClient:
+ return telemetry.SpanKindClient
+ case trace.SpanKindProducer:
+ return telemetry.SpanKindProducer
+ case trace.SpanKindConsumer:
+ return telemetry.SpanKindConsumer
+ }
+ return telemetry.SpanKind(0) // undefined.
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go
new file mode 100644
index 000000000..dbc477a59
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go
@@ -0,0 +1,33 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sdk
+
+import (
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/noop"
+)
+
+// TracerProvider returns an auto-instrumentable [trace.TracerProvider].
+//
+// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument
+// the process using the returned TracerProvider, all of the telemetry it
+// produces will be processed and handled by that Instrumentation. By default,
+// if no Instrumentation instruments the TracerProvider it will not generate
+// any trace telemetry.
+func TracerProvider() trace.TracerProvider { return tracerProviderInstance }
+
+var tracerProviderInstance = new(tracerProvider)
+
+type tracerProvider struct{ noop.TracerProvider }
+
+var _ trace.TracerProvider = tracerProvider{}
+
+func (p tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
+ cfg := trace.NewTracerConfig(opts...)
+ return tracer{
+ name: name,
+ version: cfg.InstrumentationVersion(),
+ schemaURL: cfg.SchemaURL(),
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
similarity index 100%
rename from vendor/github.com/prometheus/client_golang/LICENSE
rename to vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
new file mode 100644
index 000000000..b25641c55
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
@@ -0,0 +1,50 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+// DefaultClient is the default Client and is used by Get, Head, Post and PostForm.
+// Please be careful of initialization order - for example, if you change
+// the global propagator, the DefaultClient might still be using the old one.
+var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)}
+
+// Get is a convenient replacement for http.Get that adds a span around the request.
+func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) {
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil)
+ if err != nil {
+ return nil, err
+ }
+ return DefaultClient.Do(req)
+}
+
+// Head is a convenient replacement for http.Head that adds a span around the request.
+func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) {
+ req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil)
+ if err != nil {
+ return nil, err
+ }
+ return DefaultClient.Do(req)
+}
+
+// Post is a convenient replacement for http.Post that adds a span around the request.
+func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) {
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", contentType)
+ return DefaultClient.Do(req)
+}
+
+// PostForm is a convenient replacement for http.PostForm that adds a span around the request.
+func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) {
+ return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
new file mode 100644
index 000000000..a83a02627
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
@@ -0,0 +1,27 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "net/http"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Attribute keys that can be added to a span.
+const (
+ ReadBytesKey = attribute.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read
+ ReadErrorKey = attribute.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded)
+ WroteBytesKey = attribute.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written
+ WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded)
+)
+
+// Filter is a predicate used to determine whether a given http.request should
+// be traced. A Filter must return true if the request should be traced.
+type Filter func(*http.Request) bool
+
+func newTracer(tp trace.TracerProvider) trace.Tracer {
+ return tp.Tracer(ScopeName, trace.WithInstrumentationVersion(Version()))
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
new file mode 100644
index 000000000..6bd50d4c9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
@@ -0,0 +1,211 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "net/http"
+ "net/http/httptrace"
+
+ "go.opentelemetry.io/otel/attribute"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// ScopeName is the instrumentation scope name.
+const ScopeName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+// config represents the configuration options available for the http.Handler
+// and http.Transport types.
+type config struct {
+ ServerName string
+ Tracer trace.Tracer
+ Meter metric.Meter
+ Propagators propagation.TextMapPropagator
+ SpanStartOptions []trace.SpanStartOption
+ PublicEndpoint bool
+ PublicEndpointFn func(*http.Request) bool
+ ReadEvent bool
+ WriteEvent bool
+ Filters []Filter
+ SpanNameFormatter func(string, *http.Request) string
+ ClientTrace func(context.Context) *httptrace.ClientTrace
+
+ TracerProvider trace.TracerProvider
+ MeterProvider metric.MeterProvider
+ MetricAttributesFn func(*http.Request) []attribute.KeyValue
+}
+
+// Option interface used for setting optional config properties.
+type Option interface {
+ apply(*config)
+}
+
+type optionFunc func(*config)
+
+func (o optionFunc) apply(c *config) {
+ o(c)
+}
+
+// newConfig creates a new config struct and applies opts to it.
+func newConfig(opts ...Option) *config {
+ c := &config{
+ Propagators: otel.GetTextMapPropagator(),
+ MeterProvider: otel.GetMeterProvider(),
+ }
+ for _, opt := range opts {
+ opt.apply(c)
+ }
+
+ // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context.
+ if c.TracerProvider != nil {
+ c.Tracer = newTracer(c.TracerProvider)
+ }
+
+ c.Meter = c.MeterProvider.Meter(
+ ScopeName,
+ metric.WithInstrumentationVersion(Version()),
+ )
+
+ return c
+}
+
+// WithTracerProvider specifies a tracer provider to use for creating a tracer.
+// If none is specified, the global provider is used.
+func WithTracerProvider(provider trace.TracerProvider) Option {
+ return optionFunc(func(cfg *config) {
+ if provider != nil {
+ cfg.TracerProvider = provider
+ }
+ })
+}
+
+// WithMeterProvider specifies a meter provider to use for creating a meter.
+// If none is specified, the global provider is used.
+func WithMeterProvider(provider metric.MeterProvider) Option {
+ return optionFunc(func(cfg *config) {
+ if provider != nil {
+ cfg.MeterProvider = provider
+ }
+ })
+}
+
+// WithPublicEndpoint configures the Handler to link the span with an incoming
+// span context. If this option is not provided, then the association is a child
+// association instead of a link.
+func WithPublicEndpoint() Option {
+ return optionFunc(func(c *config) {
+ c.PublicEndpoint = true
+ })
+}
+
+// WithPublicEndpointFn runs with every request, and allows conditionally
+// configuring the Handler to link the span with an incoming span context. If
+// this option is not provided or returns false, then the association is a
+// child association instead of a link.
+// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn.
+func WithPublicEndpointFn(fn func(*http.Request) bool) Option {
+ return optionFunc(func(c *config) {
+ c.PublicEndpointFn = fn
+ })
+}
+
+// WithPropagators configures specific propagators. If this
+// option isn't specified, then the global TextMapPropagator is used.
+func WithPropagators(ps propagation.TextMapPropagator) Option {
+ return optionFunc(func(c *config) {
+ if ps != nil {
+ c.Propagators = ps
+ }
+ })
+}
+
+// WithSpanOptions configures an additional set of
+// trace.SpanOptions, which are applied to each new span.
+func WithSpanOptions(opts ...trace.SpanStartOption) Option {
+ return optionFunc(func(c *config) {
+ c.SpanStartOptions = append(c.SpanStartOptions, opts...)
+ })
+}
+
+// WithFilter adds a filter to the list of filters used by the handler.
+// If any filter indicates to exclude a request then the request will not be
+// traced. All filters must allow a request to be traced for a Span to be created.
+// If no filters are provided then all requests are traced.
+// Filters will be invoked for each processed request, it is advised to make them
+// simple and fast.
+func WithFilter(f Filter) Option {
+ return optionFunc(func(c *config) {
+ c.Filters = append(c.Filters, f)
+ })
+}
+
+type event int
+
+// Different types of events that can be recorded, see WithMessageEvents.
+const (
+ ReadEvents event = iota
+ WriteEvents
+)
+
+// WithMessageEvents configures the Handler to record the specified events
+// (span.AddEvent) on spans. By default only summary attributes are added at the
+// end of the request.
+//
+// Valid events are:
+// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read
+// using the ReadBytesKey
+// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write
+// using the WriteBytesKey
+func WithMessageEvents(events ...event) Option {
+ return optionFunc(func(c *config) {
+ for _, e := range events {
+ switch e {
+ case ReadEvents:
+ c.ReadEvent = true
+ case WriteEvents:
+ c.WriteEvent = true
+ }
+ }
+ })
+}
+
+// WithSpanNameFormatter takes a function that will be called on every
+// request and the returned string will become the Span Name.
+//
+// When using [http.ServeMux] (or any middleware that sets the Pattern of [http.Request]),
+// the span name formatter will run twice. Once when the span is created, and
+// second time after the middleware, so the pattern can be used.
+func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option {
+ return optionFunc(func(c *config) {
+ c.SpanNameFormatter = f
+ })
+}
+
+// WithClientTrace takes a function that returns client trace instance that will be
+// applied to the requests sent through the otelhttp Transport.
+func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option {
+ return optionFunc(func(c *config) {
+ c.ClientTrace = f
+ })
+}
+
+// WithServerName returns an Option that sets the name of the (virtual) server
+// handling requests.
+func WithServerName(server string) Option {
+ return optionFunc(func(c *config) {
+ c.ServerName = server
+ })
+}
+
+// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue.
+// These attributes will be included in metrics for every request.
+func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option {
+ return optionFunc(func(c *config) {
+ c.MetricAttributesFn = metricAttributesFn
+ })
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go
new file mode 100644
index 000000000..56b24b982
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go
@@ -0,0 +1,7 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package otelhttp provides an http.Handler and functions that are intended
+// to be used to add tracing by wrapping existing handlers (with Handler) and
+// routes WithRouteTag.
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
new file mode 100644
index 000000000..937f9b4e7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
@@ -0,0 +1,238 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/felixge/httpsnoop"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// middleware is an http middleware which wraps the next handler in a span.
+type middleware struct {
+ operation string
+ server string
+
+ tracer trace.Tracer
+ propagators propagation.TextMapPropagator
+ spanStartOptions []trace.SpanStartOption
+ readEvent bool
+ writeEvent bool
+ filters []Filter
+ spanNameFormatter func(string, *http.Request) string
+ publicEndpoint bool
+ publicEndpointFn func(*http.Request) bool
+ metricAttributesFn func(*http.Request) []attribute.KeyValue
+
+ semconv semconv.HTTPServer
+}
+
+func defaultHandlerFormatter(operation string, _ *http.Request) string {
+ return operation
+}
+
+// NewHandler wraps the passed handler in a span named after the operation and
+// enriches it with metrics.
+func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler {
+ return NewMiddleware(operation, opts...)(handler)
+}
+
+// NewMiddleware returns a tracing and metrics instrumentation middleware.
+// The handler returned by the middleware wraps a handler
+// in a span named after the operation and enriches it with metrics.
+func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler {
+ h := middleware{
+ operation: operation,
+ }
+
+ defaultOpts := []Option{
+ WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)),
+ WithSpanNameFormatter(defaultHandlerFormatter),
+ }
+
+ c := newConfig(append(defaultOpts, opts...)...)
+ h.configure(c)
+
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ h.serveHTTP(w, r, next)
+ })
+ }
+}
+
+func (h *middleware) configure(c *config) {
+ h.tracer = c.Tracer
+ h.propagators = c.Propagators
+ h.spanStartOptions = c.SpanStartOptions
+ h.readEvent = c.ReadEvent
+ h.writeEvent = c.WriteEvent
+ h.filters = c.Filters
+ h.spanNameFormatter = c.SpanNameFormatter
+ h.publicEndpoint = c.PublicEndpoint
+ h.publicEndpointFn = c.PublicEndpointFn
+ h.server = c.ServerName
+ h.semconv = semconv.NewHTTPServer(c.Meter)
+ h.metricAttributesFn = c.MetricAttributesFn
+}
+
+// serveHTTP sets up tracing and calls the given next http.Handler with the span
+// context injected into the request context.
+func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) {
+ requestStartTime := time.Now()
+ for _, f := range h.filters {
+ if !f(r) {
+ // Simply pass through to the handler if a filter rejects the request
+ next.ServeHTTP(w, r)
+ return
+ }
+ }
+
+ ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
+ opts := []trace.SpanStartOption{
+ trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r, semconv.RequestTraceAttrsOpts{})...),
+ }
+
+ opts = append(opts, h.spanStartOptions...)
+ if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) {
+ opts = append(opts, trace.WithNewRoot())
+ // Linking incoming span context if any for public endpoint.
+ if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() {
+ opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s}))
+ }
+ }
+
+ tracer := h.tracer
+
+ if tracer == nil {
+ if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() {
+ tracer = newTracer(span.TracerProvider())
+ } else {
+ tracer = newTracer(otel.GetTracerProvider())
+ }
+ }
+
+ if startTime := StartTimeFromContext(ctx); !startTime.IsZero() {
+ opts = append(opts, trace.WithTimestamp(startTime))
+ requestStartTime = startTime
+ }
+
+ ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...)
+ defer span.End()
+
+ readRecordFunc := func(int64) {}
+ if h.readEvent {
+ readRecordFunc = func(n int64) {
+ span.AddEvent("read", trace.WithAttributes(ReadBytesKey.Int64(n)))
+ }
+ }
+
+ // if request body is nil or NoBody, we don't want to mutate the body as it
+ // will affect the identity of it in an unforeseeable way because we assert
+ // ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
+ bw := request.NewBodyWrapper(r.Body, readRecordFunc)
+ if r.Body != nil && r.Body != http.NoBody {
+ r.Body = bw
+ }
+
+ writeRecordFunc := func(int64) {}
+ if h.writeEvent {
+ writeRecordFunc = func(n int64) {
+ span.AddEvent("write", trace.WithAttributes(WroteBytesKey.Int64(n)))
+ }
+ }
+
+ rww := request.NewRespWriterWrapper(w, writeRecordFunc)
+
+ // Wrap w to use our ResponseWriter methods while also exposing
+ // other interfaces that w may implement (http.CloseNotifier,
+ // http.Flusher, http.Hijacker, http.Pusher, io.ReaderFrom).
+
+ w = httpsnoop.Wrap(w, httpsnoop.Hooks{
+ Header: func(httpsnoop.HeaderFunc) httpsnoop.HeaderFunc {
+ return rww.Header
+ },
+ Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc {
+ return rww.Write
+ },
+ WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc {
+ return rww.WriteHeader
+ },
+ Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc {
+ return rww.Flush
+ },
+ })
+
+ labeler, found := LabelerFromContext(ctx)
+ if !found {
+ ctx = ContextWithLabeler(ctx, labeler)
+ }
+
+ r = r.WithContext(ctx)
+ next.ServeHTTP(w, r)
+
+ if r.Pattern != "" {
+ span.SetName(h.spanNameFormatter(h.operation, r))
+ }
+
+ statusCode := rww.StatusCode()
+ bytesWritten := rww.BytesWritten()
+ span.SetStatus(h.semconv.Status(statusCode))
+ span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{
+ StatusCode: statusCode,
+ ReadBytes: bw.BytesRead(),
+ ReadError: bw.Error(),
+ WriteBytes: bytesWritten,
+ WriteError: rww.Error(),
+ })...)
+
+ // Use floating point division here for higher precision (instead of Millisecond method).
+ elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
+
+ metricAttributes := semconv.MetricAttributes{
+ Req: r,
+ StatusCode: statusCode,
+ AdditionalAttributes: append(labeler.Get(), h.metricAttributesFromRequest(r)...),
+ }
+
+ h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{
+ ServerName: h.server,
+ ResponseSize: bytesWritten,
+ MetricAttributes: metricAttributes,
+ MetricData: semconv.MetricData{
+ RequestSize: bw.BytesRead(),
+ ElapsedTime: elapsedTime,
+ },
+ })
+}
+
+func (h *middleware) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue {
+ var attributeForRequest []attribute.KeyValue
+ if h.metricAttributesFn != nil {
+ attributeForRequest = h.metricAttributesFn(r)
+ }
+ return attributeForRequest
+}
+
+// WithRouteTag annotates spans and metrics with the provided route name
+// with HTTP route attribute.
+func WithRouteTag(route string, h http.Handler) http.Handler {
+ attr := semconv.NewHTTPServer(nil).Route(route)
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ span := trace.SpanFromContext(r.Context())
+ span.SetAttributes(attr)
+
+ labeler, _ := LabelerFromContext(r.Context())
+ labeler.Add(attr)
+
+ h.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go
new file mode 100644
index 000000000..d032aa841
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go
@@ -0,0 +1,80 @@
+// Code generated by gotmpl. DO NOT MODIFY.
+// source: internal/shared/request/body_wrapper.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package request provides types and functionality to handle HTTP request
+// handling.
+package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+
+import (
+ "io"
+ "sync"
+)
+
+var _ io.ReadCloser = &BodyWrapper{}
+
+// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
+// of bytes read and the last error.
+type BodyWrapper struct {
+ io.ReadCloser
+ OnRead func(n int64) // must not be nil
+
+ mu sync.Mutex
+ read int64
+ err error
+}
+
+// NewBodyWrapper creates a new BodyWrapper.
+//
+// The onRead attribute is a callback that will be called every time the data
+// is read, with the number of bytes being read.
+func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper {
+ return &BodyWrapper{
+ ReadCloser: body,
+ OnRead: onRead,
+ }
+}
+
+// Read reads the data from the io.ReadCloser, and stores the number of bytes
+// read and the error.
+func (w *BodyWrapper) Read(b []byte) (int, error) {
+ n, err := w.ReadCloser.Read(b)
+ n1 := int64(n)
+
+ w.updateReadData(n1, err)
+ w.OnRead(n1)
+ return n, err
+}
+
+func (w *BodyWrapper) updateReadData(n int64, err error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ w.read += n
+ if err != nil {
+ w.err = err
+ }
+}
+
+// Close closes the io.ReadCloser.
+func (w *BodyWrapper) Close() error {
+ return w.ReadCloser.Close()
+}
+
+// BytesRead returns the number of bytes read up to this point.
+func (w *BodyWrapper) BytesRead() int64 {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ return w.read
+}
+
+// Error returns the last error.
+func (w *BodyWrapper) Error() error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ return w.err
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go
new file mode 100644
index 000000000..9e00dd2fc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+
+// Generate request package:
+//go:generate gotmpl --body=../../../../../../internal/shared/request/body_wrapper.go.tmpl "--data={}" --out=body_wrapper.go
+//go:generate gotmpl --body=../../../../../../internal/shared/request/body_wrapper_test.go.tmpl "--data={}" --out=body_wrapper_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/request/resp_writer_wrapper.go.tmpl "--data={}" --out=resp_writer_wrapper.go
+//go:generate gotmpl --body=../../../../../../internal/shared/request/resp_writer_wrapper_test.go.tmpl "--data={}" --out=resp_writer_wrapper_test.go
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go
new file mode 100644
index 000000000..ca2e4c14c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go
@@ -0,0 +1,122 @@
+// Code generated by gotmpl. DO NOT MODIFY.
+// source: internal/shared/request/resp_writer_wrapper.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+
+import (
+ "net/http"
+ "sync"
+)
+
+var _ http.ResponseWriter = &RespWriterWrapper{}
+
+// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of
+// bytes written, the last error, and to catch the first written statusCode.
+// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
+// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc)
+// that may be useful when using it in real life situations.
+type RespWriterWrapper struct {
+ http.ResponseWriter
+ OnWrite func(n int64) // must not be nil
+
+ mu sync.RWMutex
+ written int64
+ statusCode int
+ err error
+ wroteHeader bool
+}
+
+// NewRespWriterWrapper creates a new RespWriterWrapper.
+//
+// The onWrite attribute is a callback that will be called every time the data
+// is written, with the number of bytes that were written.
+func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper {
+ return &RespWriterWrapper{
+ ResponseWriter: w,
+ OnWrite: onWrite,
+ statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
+ }
+}
+
+// Write writes the bytes array into the [ResponseWriter], and tracks the
+// number of bytes written and last error.
+func (w *RespWriterWrapper) Write(p []byte) (int, error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if !w.wroteHeader {
+ w.writeHeader(http.StatusOK)
+ }
+
+ n, err := w.ResponseWriter.Write(p)
+ n1 := int64(n)
+ w.OnWrite(n1)
+ w.written += n1
+ w.err = err
+ return n, err
+}
+
+// WriteHeader persists initial statusCode for span attribution.
+// All calls to WriteHeader will be propagated to the underlying ResponseWriter
+// and will persist the statusCode from the first call.
+// Blocking consecutive calls to WriteHeader alters expected behavior and will
+// remove warning logs from net/http where developers will notice incorrect handler implementations.
+func (w *RespWriterWrapper) WriteHeader(statusCode int) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ w.writeHeader(statusCode)
+}
+
+// writeHeader persists the status code for span attribution, and propagates
+// the call to the underlying ResponseWriter.
+// It does not acquire a lock, and therefore assumes that is being handled by a
+// parent method.
+func (w *RespWriterWrapper) writeHeader(statusCode int) {
+ if !w.wroteHeader {
+ w.wroteHeader = true
+ w.statusCode = statusCode
+ }
+ w.ResponseWriter.WriteHeader(statusCode)
+}
+
+// Flush implements [http.Flusher].
+func (w *RespWriterWrapper) Flush() {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if !w.wroteHeader {
+ w.writeHeader(http.StatusOK)
+ }
+
+ if f, ok := w.ResponseWriter.(http.Flusher); ok {
+ f.Flush()
+ }
+}
+
+// BytesWritten returns the number of bytes written.
+func (w *RespWriterWrapper) BytesWritten() int64 {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+
+ return w.written
+}
+
+// StatusCode returns the HTTP status code that was sent.
+func (w *RespWriterWrapper) StatusCode() int {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+
+ return w.statusCode
+}
+
+// Error returns the last error.
+func (w *RespWriterWrapper) Error() error {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+
+ return w.err
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
new file mode 100644
index 000000000..7edc8d10f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
@@ -0,0 +1,343 @@
+// Code generated by gotmpl. DO NOT MODIFY.
+// source: internal/shared/semconv/env.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "os"
+ "strings"
+ "sync"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/semconv/v1.34.0/httpconv"
+)
+
+// OTelSemConvStabilityOptIn is an environment variable.
+// That can be set to "http/dup" to keep getting the old HTTP semantic conventions.
+const OTelSemConvStabilityOptIn = "OTEL_SEMCONV_STABILITY_OPT_IN"
+
+type ResponseTelemetry struct {
+ StatusCode int
+ ReadBytes int64
+ ReadError error
+ WriteBytes int64
+ WriteError error
+}
+
+type HTTPServer struct {
+ duplicate bool
+
+ // Old metrics
+ requestBytesCounter metric.Int64Counter
+ responseBytesCounter metric.Int64Counter
+ serverLatencyMeasure metric.Float64Histogram
+
+ // New metrics
+ requestBodySizeHistogram httpconv.ServerRequestBodySize
+ responseBodySizeHistogram httpconv.ServerResponseBodySize
+ requestDurationHistogram httpconv.ServerRequestDuration
+}
+
+// RequestTraceAttrs returns trace attributes for an HTTP request received by a
+// server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue {
+ attrs := CurrentHTTPServer{}.RequestTraceAttrs(server, req, opts)
+ if s.duplicate {
+ return OldHTTPServer{}.RequestTraceAttrs(server, req, attrs)
+ }
+ return attrs
+}
+
+func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue {
+ if s.duplicate {
+ return []attribute.KeyValue{
+ OldHTTPServer{}.NetworkTransportAttr(network),
+ CurrentHTTPServer{}.NetworkTransportAttr(network),
+ }
+ }
+ return []attribute.KeyValue{
+ CurrentHTTPServer{}.NetworkTransportAttr(network),
+ }
+}
+
+// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response.
+//
+// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
+func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
+ attrs := CurrentHTTPServer{}.ResponseTraceAttrs(resp)
+ if s.duplicate {
+ return OldHTTPServer{}.ResponseTraceAttrs(resp, attrs)
+ }
+ return attrs
+}
+
+// Route returns the attribute for the route.
+func (s HTTPServer) Route(route string) attribute.KeyValue {
+ return CurrentHTTPServer{}.Route(route)
+}
+
+// Status returns a span status code and message for an HTTP status code
+// value returned by a server. Status codes in the 400-499 range are not
+// returned as errors.
+func (s HTTPServer) Status(code int) (codes.Code, string) {
+ if code < 100 || code >= 600 {
+ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+ if code >= 500 {
+ return codes.Error, ""
+ }
+ return codes.Unset, ""
+}
+
+type ServerMetricData struct {
+ ServerName string
+ ResponseSize int64
+
+ MetricData
+ MetricAttributes
+}
+
+type MetricAttributes struct {
+ Req *http.Request
+ StatusCode int
+ AdditionalAttributes []attribute.KeyValue
+}
+
+type MetricData struct {
+ RequestSize int64
+
+ // The request duration, in milliseconds
+ ElapsedTime float64
+}
+
+var (
+ metricAddOptionPool = &sync.Pool{
+ New: func() interface{} {
+ return &[]metric.AddOption{}
+ },
+ }
+
+ metricRecordOptionPool = &sync.Pool{
+ New: func() interface{} {
+ return &[]metric.RecordOption{}
+ },
+ }
+)
+
+func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) {
+ attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
+ o := metric.WithAttributeSet(attribute.NewSet(attributes...))
+ recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption)
+ *recordOpts = append(*recordOpts, o)
+ s.requestBodySizeHistogram.Inst().Record(ctx, md.RequestSize, *recordOpts...)
+ s.responseBodySizeHistogram.Inst().Record(ctx, md.ResponseSize, *recordOpts...)
+ s.requestDurationHistogram.Inst().Record(ctx, md.ElapsedTime/1000.0, o)
+ *recordOpts = (*recordOpts)[:0]
+ metricRecordOptionPool.Put(recordOpts)
+
+ if s.duplicate && s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil {
+ attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
+ o := metric.WithAttributeSet(attribute.NewSet(attributes...))
+ addOpts := metricAddOptionPool.Get().(*[]metric.AddOption)
+ *addOpts = append(*addOpts, o)
+ s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...)
+ s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...)
+ s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o)
+ *addOpts = (*addOpts)[:0]
+ metricAddOptionPool.Put(addOpts)
+ }
+}
+
+// hasOptIn returns true if the comma-separated version string contains the
+// exact optIn value.
+func hasOptIn(version, optIn string) bool {
+ for _, v := range strings.Split(version, ",") {
+ if strings.TrimSpace(v) == optIn {
+ return true
+ }
+ }
+ return false
+}
+
+func NewHTTPServer(meter metric.Meter) HTTPServer {
+ env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn))
+ duplicate := hasOptIn(env, "http/dup")
+ server := HTTPServer{
+ duplicate: duplicate,
+ }
+
+ var err error
+ server.requestBodySizeHistogram, err = httpconv.NewServerRequestBodySize(meter)
+ handleErr(err)
+
+ server.responseBodySizeHistogram, err = httpconv.NewServerResponseBodySize(meter)
+ handleErr(err)
+
+ server.requestDurationHistogram, err = httpconv.NewServerRequestDuration(
+ meter,
+ metric.WithExplicitBucketBoundaries(
+ 0.005, 0.01, 0.025, 0.05, 0.075, 0.1,
+ 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10,
+ ),
+ )
+ handleErr(err)
+
+ if duplicate {
+ server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter)
+ }
+ return server
+}
+
+type HTTPClient struct {
+ duplicate bool
+
+ // old metrics
+ requestBytesCounter metric.Int64Counter
+ responseBytesCounter metric.Int64Counter
+ latencyMeasure metric.Float64Histogram
+
+ // new metrics
+ requestBodySize httpconv.ClientRequestBodySize
+ requestDuration httpconv.ClientRequestDuration
+}
+
+func NewHTTPClient(meter metric.Meter) HTTPClient {
+ env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn))
+ duplicate := hasOptIn(env, "http/dup")
+ client := HTTPClient{
+ duplicate: duplicate,
+ }
+
+ var err error
+ client.requestBodySize, err = httpconv.NewClientRequestBodySize(meter)
+ handleErr(err)
+
+ client.requestDuration, err = httpconv.NewClientRequestDuration(
+ meter,
+ metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10),
+ )
+ handleErr(err)
+
+ if duplicate {
+ client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter)
+ }
+
+ return client
+}
+
+// RequestTraceAttrs returns attributes for an HTTP request made by a client.
+func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
+ attrs := CurrentHTTPClient{}.RequestTraceAttrs(req)
+ if c.duplicate {
+ return OldHTTPClient{}.RequestTraceAttrs(req, attrs)
+ }
+ return attrs
+}
+
+// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client.
+func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
+ attrs := CurrentHTTPClient{}.ResponseTraceAttrs(resp)
+ if c.duplicate {
+ return OldHTTPClient{}.ResponseTraceAttrs(resp, attrs)
+ }
+ return attrs
+}
+
+func (c HTTPClient) Status(code int) (codes.Code, string) {
+ if code < 100 || code >= 600 {
+ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+ if code >= 400 {
+ return codes.Error, ""
+ }
+ return codes.Unset, ""
+}
+
+func (c HTTPClient) ErrorType(err error) attribute.KeyValue {
+ return CurrentHTTPClient{}.ErrorType(err)
+}
+
+type MetricOpts struct {
+ measurement metric.MeasurementOption
+ addOptions metric.AddOption
+}
+
+func (o MetricOpts) MeasurementOption() metric.MeasurementOption {
+ return o.measurement
+}
+
+func (o MetricOpts) AddOptions() metric.AddOption {
+ return o.addOptions
+}
+
+func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts {
+ opts := map[string]MetricOpts{}
+
+ attributes := CurrentHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes)
+ set := metric.WithAttributeSet(attribute.NewSet(attributes...))
+ opts["new"] = MetricOpts{
+ measurement: set,
+ addOptions: set,
+ }
+
+ if c.duplicate {
+ attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes)
+ set := metric.WithAttributeSet(attribute.NewSet(attributes...))
+ opts["old"] = MetricOpts{
+ measurement: set,
+ addOptions: set,
+ }
+ }
+
+ return opts
+}
+
+func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) {
+ s.requestBodySize.Inst().Record(ctx, md.RequestSize, opts["new"].MeasurementOption())
+ s.requestDuration.Inst().Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption())
+
+ if s.duplicate {
+ s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions())
+ s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption())
+ }
+}
+
+func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts map[string]MetricOpts) {
+ if s.responseBytesCounter == nil {
+ // This will happen if an HTTPClient{} is used instead of NewHTTPClient().
+ return
+ }
+
+ s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions())
+}
+
+func (s HTTPClient) TraceAttributes(host string) []attribute.KeyValue {
+ attrs := CurrentHTTPClient{}.TraceAttributes(host)
+ if s.duplicate {
+ return OldHTTPClient{}.TraceAttributes(host, attrs)
+ }
+
+ return attrs
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go
new file mode 100644
index 000000000..b4036dd90
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go
@@ -0,0 +1,16 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+
+// Generate semconv package:
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/bench_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=bench_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/common_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=common_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconvtest_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconvtest_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/v1.20.0.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=v1.20.0.go
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go
new file mode 100644
index 000000000..9766f3ac8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go
@@ -0,0 +1,517 @@
+// Code generated by gotmpl. DO NOT MODIFY.
+// source: internal/shared/semconv/httpconv.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconv provides OpenTelemetry semantic convention types and
+// functionality.
+package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+ "slices"
+ "strconv"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ semconvNew "go.opentelemetry.io/otel/semconv/v1.34.0"
+)
+
+type RequestTraceAttrsOpts struct {
+ // If set, this is used as value for the "http.client_ip" attribute.
+ HTTPClientIP string
+}
+
+type CurrentHTTPServer struct{}
+
+// RequestTraceAttrs returns trace attributes for an HTTP request received by a
+// server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue {
+ count := 3 // ServerAddress, Method, Scheme
+
+ var host string
+ var p int
+ if server == "" {
+ host, p = SplitHostPort(req.Host)
+ } else {
+ // Prioritize the primary server name.
+ host, p = SplitHostPort(server)
+ if p < 0 {
+ _, p = SplitHostPort(req.Host)
+ }
+ }
+
+ hostPort := requiredHTTPPort(req.TLS != nil, p)
+ if hostPort > 0 {
+ count++
+ }
+
+ method, methodOriginal := n.method(req.Method)
+ if methodOriginal != (attribute.KeyValue{}) {
+ count++
+ }
+
+ scheme := n.scheme(req.TLS != nil)
+
+ peer, peerPort := SplitHostPort(req.RemoteAddr)
+ if peer != "" {
+ // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
+ // file-path that would be interpreted with a sock family.
+ count++
+ if peerPort > 0 {
+ count++
+ }
+ }
+
+ useragent := req.UserAgent()
+ if useragent != "" {
+ count++
+ }
+
+ // For client IP, use, in order:
+ // 1. The value passed in the options
+ // 2. The value in the X-Forwarded-For header
+ // 3. The peer address
+ clientIP := opts.HTTPClientIP
+ if clientIP == "" {
+ clientIP = serverClientIP(req.Header.Get("X-Forwarded-For"))
+ if clientIP == "" {
+ clientIP = peer
+ }
+ }
+ if clientIP != "" {
+ count++
+ }
+
+ if req.URL != nil && req.URL.Path != "" {
+ count++
+ }
+
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" && protoName != "http" {
+ count++
+ }
+ if protoVersion != "" {
+ count++
+ }
+
+ route := httpRoute(req.Pattern)
+ if route != "" {
+ count++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, count)
+ attrs = append(attrs,
+ semconvNew.ServerAddress(host),
+ method,
+ scheme,
+ )
+
+ if hostPort > 0 {
+ attrs = append(attrs, semconvNew.ServerPort(hostPort))
+ }
+ if methodOriginal != (attribute.KeyValue{}) {
+ attrs = append(attrs, methodOriginal)
+ }
+
+ if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" {
+ // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
+ // file-path that would be interpreted with a sock family.
+ attrs = append(attrs, semconvNew.NetworkPeerAddress(peer))
+ if peerPort > 0 {
+ attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort))
+ }
+ }
+
+ if useragent != "" {
+ attrs = append(attrs, semconvNew.UserAgentOriginal(useragent))
+ }
+
+ if clientIP != "" {
+ attrs = append(attrs, semconvNew.ClientAddress(clientIP))
+ }
+
+ if req.URL != nil && req.URL.Path != "" {
+ attrs = append(attrs, semconvNew.URLPath(req.URL.Path))
+ }
+
+ if protoName != "" && protoName != "http" {
+ attrs = append(attrs, semconvNew.NetworkProtocolName(protoName))
+ }
+ if protoVersion != "" {
+ attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion))
+ }
+
+ if route != "" {
+ attrs = append(attrs, n.Route(route))
+ }
+
+ return attrs
+}
+
+func (n CurrentHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue {
+ switch network {
+ case "tcp", "tcp4", "tcp6":
+ return semconvNew.NetworkTransportTCP
+ case "udp", "udp4", "udp6":
+ return semconvNew.NetworkTransportUDP
+ case "unix", "unixgram", "unixpacket":
+ return semconvNew.NetworkTransportUnix
+ default:
+ return semconvNew.NetworkTransportPipe
+ }
+}
+
+func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) {
+ if method == "" {
+ return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{}
+ }
+ if attr, ok := methodLookup[method]; ok {
+ return attr, attribute.KeyValue{}
+ }
+
+ orig := semconvNew.HTTPRequestMethodOriginal(method)
+ if attr, ok := methodLookup[strings.ToUpper(method)]; ok {
+ return attr, orig
+ }
+ return semconvNew.HTTPRequestMethodGet, orig
+}
+
+func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
+ if https {
+ return semconvNew.URLScheme("https")
+ }
+ return semconvNew.URLScheme("http")
+}
+
+// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP
+// response.
+//
+// If any of the fields in the ResponseTelemetry are not set the attribute will
+// be omitted.
+func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
+ var count int
+
+ if resp.ReadBytes > 0 {
+ count++
+ }
+ if resp.WriteBytes > 0 {
+ count++
+ }
+ if resp.StatusCode > 0 {
+ count++
+ }
+
+ attributes := make([]attribute.KeyValue, 0, count)
+
+ if resp.ReadBytes > 0 {
+ attributes = append(attributes,
+ semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)),
+ )
+ }
+ if resp.WriteBytes > 0 {
+ attributes = append(attributes,
+ semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)),
+ )
+ }
+ if resp.StatusCode > 0 {
+ attributes = append(attributes,
+ semconvNew.HTTPResponseStatusCode(resp.StatusCode),
+ )
+ }
+
+ return attributes
+}
+
+// Route returns the attribute for the route.
+func (n CurrentHTTPServer) Route(route string) attribute.KeyValue {
+ return semconvNew.HTTPRoute(route)
+}
+
+func (n CurrentHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
+ num := len(additionalAttributes) + 3
+ var host string
+ var p int
+ if server == "" {
+ host, p = SplitHostPort(req.Host)
+ } else {
+ // Prioritize the primary server name.
+ host, p = SplitHostPort(server)
+ if p < 0 {
+ _, p = SplitHostPort(req.Host)
+ }
+ }
+ hostPort := requiredHTTPPort(req.TLS != nil, p)
+ if hostPort > 0 {
+ num++
+ }
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" {
+ num++
+ }
+ if protoVersion != "" {
+ num++
+ }
+
+ if statusCode > 0 {
+ num++
+ }
+
+ attributes := slices.Grow(additionalAttributes, num)
+ attributes = append(attributes,
+ semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)),
+ n.scheme(req.TLS != nil),
+ semconvNew.ServerAddress(host))
+
+ if hostPort > 0 {
+ attributes = append(attributes, semconvNew.ServerPort(hostPort))
+ }
+ if protoName != "" {
+ attributes = append(attributes, semconvNew.NetworkProtocolName(protoName))
+ }
+ if protoVersion != "" {
+ attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion))
+ }
+
+ if statusCode > 0 {
+ attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode))
+ }
+ return attributes
+}
+
+type CurrentHTTPClient struct{}
+
+// RequestTraceAttrs returns trace attributes for an HTTP request made by a client.
+func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
+ /*
+ below attributes are returned:
+ - http.request.method
+ - http.request.method.original
+ - url.full
+ - server.address
+ - server.port
+ - network.protocol.name
+ - network.protocol.version
+ */
+ numOfAttributes := 3 // URL, server address, proto, and method.
+
+ var urlHost string
+ if req.URL != nil {
+ urlHost = req.URL.Host
+ }
+ var requestHost string
+ var requestPort int
+ for _, hostport := range []string{urlHost, req.Header.Get("Host")} {
+ requestHost, requestPort = SplitHostPort(hostport)
+ if requestHost != "" || requestPort > 0 {
+ break
+ }
+ }
+
+ eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
+ if eligiblePort > 0 {
+ numOfAttributes++
+ }
+ useragent := req.UserAgent()
+ if useragent != "" {
+ numOfAttributes++
+ }
+
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" && protoName != "http" {
+ numOfAttributes++
+ }
+ if protoVersion != "" {
+ numOfAttributes++
+ }
+
+ method, originalMethod := n.method(req.Method)
+ if originalMethod != (attribute.KeyValue{}) {
+ numOfAttributes++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, numOfAttributes)
+
+ attrs = append(attrs, method)
+ if originalMethod != (attribute.KeyValue{}) {
+ attrs = append(attrs, originalMethod)
+ }
+
+ var u string
+ if req.URL != nil {
+ // Remove any username/password info that may be in the URL.
+ userinfo := req.URL.User
+ req.URL.User = nil
+ u = req.URL.String()
+ // Restore any username/password info that was removed.
+ req.URL.User = userinfo
+ }
+ attrs = append(attrs, semconvNew.URLFull(u))
+
+ attrs = append(attrs, semconvNew.ServerAddress(requestHost))
+ if eligiblePort > 0 {
+ attrs = append(attrs, semconvNew.ServerPort(eligiblePort))
+ }
+
+ if protoName != "" && protoName != "http" {
+ attrs = append(attrs, semconvNew.NetworkProtocolName(protoName))
+ }
+ if protoVersion != "" {
+ attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion))
+ }
+
+ return attrs
+}
+
+// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client.
+func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
+ /*
+ below attributes are returned:
+ - http.response.status_code
+ - error.type
+ */
+ var count int
+ if resp.StatusCode > 0 {
+ count++
+ }
+
+ if isErrorStatusCode(resp.StatusCode) {
+ count++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, count)
+ if resp.StatusCode > 0 {
+ attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode))
+ }
+
+ if isErrorStatusCode(resp.StatusCode) {
+ errorType := strconv.Itoa(resp.StatusCode)
+ attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType))
+ }
+ return attrs
+}
+
+func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue {
+ t := reflect.TypeOf(err)
+ var value string
+ if t.PkgPath() == "" && t.Name() == "" {
+ // Likely a builtin type.
+ value = t.String()
+ } else {
+ value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+ }
+
+ if value == "" {
+ return semconvNew.ErrorTypeOther
+ }
+
+ return semconvNew.ErrorTypeKey.String(value)
+}
+
+func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) {
+ if method == "" {
+ return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{}
+ }
+ if attr, ok := methodLookup[method]; ok {
+ return attr, attribute.KeyValue{}
+ }
+
+ orig := semconvNew.HTTPRequestMethodOriginal(method)
+ if attr, ok := methodLookup[strings.ToUpper(method)]; ok {
+ return attr, orig
+ }
+ return semconvNew.HTTPRequestMethodGet, orig
+}
+
+func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
+ num := len(additionalAttributes) + 2
+ var h string
+ if req.URL != nil {
+ h = req.URL.Host
+ }
+ var requestHost string
+ var requestPort int
+ for _, hostport := range []string{h, req.Header.Get("Host")} {
+ requestHost, requestPort = SplitHostPort(hostport)
+ if requestHost != "" || requestPort > 0 {
+ break
+ }
+ }
+
+ port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
+ if port > 0 {
+ num++
+ }
+
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" {
+ num++
+ }
+ if protoVersion != "" {
+ num++
+ }
+
+ if statusCode > 0 {
+ num++
+ }
+
+ attributes := slices.Grow(additionalAttributes, num)
+ attributes = append(attributes,
+ semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)),
+ semconvNew.ServerAddress(requestHost),
+ n.scheme(req),
+ )
+
+ if port > 0 {
+ attributes = append(attributes, semconvNew.ServerPort(port))
+ }
+ if protoName != "" {
+ attributes = append(attributes, semconvNew.NetworkProtocolName(protoName))
+ }
+ if protoVersion != "" {
+ attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion))
+ }
+
+ if statusCode > 0 {
+ attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode))
+ }
+ return attributes
+}
+
+// TraceAttributes returns attributes for httptrace.
+func (n CurrentHTTPClient) TraceAttributes(host string) []attribute.KeyValue {
+ return []attribute.KeyValue{
+ semconvNew.ServerAddress(host),
+ }
+}
+
+func (n CurrentHTTPClient) scheme(req *http.Request) attribute.KeyValue {
+ if req.URL != nil && req.URL.Scheme != "" {
+ return semconvNew.URLScheme(req.URL.Scheme)
+ }
+ if req.TLS != nil {
+ return semconvNew.URLScheme("https")
+ }
+ return semconvNew.URLScheme("http")
+}
+
+func isErrorStatusCode(code int) bool {
+ return code >= 400 || code < 100
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
new file mode 100644
index 000000000..e4ebf8581
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
@@ -0,0 +1,127 @@
+// Code generated by gotmpl. DO NOT MODIFY.
+// source: internal/shared/semconv/util.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+
+import (
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ semconvNew "go.opentelemetry.io/otel/semconv/v1.34.0"
+)
+
+// SplitHostPort splits a network address hostport of the form "host",
+// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
+// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
+// port.
+//
+// An empty host is returned if it is not provided or unparsable. A negative
+// port is returned if it is not provided or unparsable.
+func SplitHostPort(hostport string) (host string, port int) {
+ port = -1
+
+ if strings.HasPrefix(hostport, "[") {
+ addrEnd := strings.LastIndexByte(hostport, ']')
+ if addrEnd < 0 {
+ // Invalid hostport.
+ return
+ }
+ if i := strings.LastIndexByte(hostport[addrEnd:], ':'); i < 0 {
+ host = hostport[1:addrEnd]
+ return
+ }
+ } else {
+ if i := strings.LastIndexByte(hostport, ':'); i < 0 {
+ host = hostport
+ return
+ }
+ }
+
+ host, pStr, err := net.SplitHostPort(hostport)
+ if err != nil {
+ return
+ }
+
+ p, err := strconv.ParseUint(pStr, 10, 16)
+ if err != nil {
+ return
+ }
+ return host, int(p) // nolint: gosec // Byte size checked 16 above.
+}
+
+func requiredHTTPPort(https bool, port int) int { // nolint:revive
+ if https {
+ if port > 0 && port != 443 {
+ return port
+ }
+ } else {
+ if port > 0 && port != 80 {
+ return port
+ }
+ }
+ return -1
+}
+
+func serverClientIP(xForwardedFor string) string {
+ if idx := strings.IndexByte(xForwardedFor, ','); idx >= 0 {
+ xForwardedFor = xForwardedFor[:idx]
+ }
+ return xForwardedFor
+}
+
+func httpRoute(pattern string) string {
+ if idx := strings.IndexByte(pattern, '/'); idx >= 0 {
+ return pattern[idx:]
+ }
+ return ""
+}
+
+func netProtocol(proto string) (name string, version string) {
+ name, version, _ = strings.Cut(proto, "/")
+ switch name {
+ case "HTTP":
+ name = "http"
+ case "QUIC":
+ name = "quic"
+ case "SPDY":
+ name = "spdy"
+ default:
+ name = strings.ToLower(name)
+ }
+ return name, version
+}
+
+var methodLookup = map[string]attribute.KeyValue{
+ http.MethodConnect: semconvNew.HTTPRequestMethodConnect,
+ http.MethodDelete: semconvNew.HTTPRequestMethodDelete,
+ http.MethodGet: semconvNew.HTTPRequestMethodGet,
+ http.MethodHead: semconvNew.HTTPRequestMethodHead,
+ http.MethodOptions: semconvNew.HTTPRequestMethodOptions,
+ http.MethodPatch: semconvNew.HTTPRequestMethodPatch,
+ http.MethodPost: semconvNew.HTTPRequestMethodPost,
+ http.MethodPut: semconvNew.HTTPRequestMethodPut,
+ http.MethodTrace: semconvNew.HTTPRequestMethodTrace,
+}
+
+func handleErr(err error) {
+ if err != nil {
+ otel.Handle(err)
+ }
+}
+
+func standardizeHTTPMethod(method string) string {
+ method = strings.ToUpper(method)
+ switch method {
+ case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
+ default:
+ method = "_OTHER"
+ }
+ return method
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
new file mode 100644
index 000000000..ba7fccf1e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
@@ -0,0 +1,273 @@
+// Code generated by gotmpl. DO NOT MODIFY.
+// source: internal/shared/semconv/v120.0.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "slices"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/noop"
+ semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+)
+
+type OldHTTPServer struct{}
+
+// RequestTraceAttrs returns trace attributes for an HTTP request received by a
+// server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
+ return semconvutil.HTTPServerRequest(server, req, semconvutil.HTTPServerRequestOptions{}, attrs)
+}
+
+func (o OldHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue {
+ return semconvutil.NetTransport(network)
+}
+
+// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response.
+//
+// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
+func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry, attributes []attribute.KeyValue) []attribute.KeyValue {
+ if resp.ReadBytes > 0 {
+ attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes)))
+ }
+ if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) {
+ // This is not in the semantic conventions, but is historically provided
+ attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error()))
+ }
+ if resp.WriteBytes > 0 {
+ attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes)))
+ }
+ if resp.StatusCode > 0 {
+ attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode))
+ }
+ if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) {
+ // This is not in the semantic conventions, but is historically provided
+ attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error()))
+ }
+
+ return attributes
+}
+
+// Route returns the attribute for the route.
+func (o OldHTTPServer) Route(route string) attribute.KeyValue {
+ return semconv.HTTPRoute(route)
+}
+
+// HTTPStatusCode returns the attribute for the HTTP status code.
+// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added.
+func HTTPStatusCode(status int) attribute.KeyValue {
+ return semconv.HTTPStatusCode(status)
+}
+
+// Server HTTP metrics.
+const (
+ serverRequestSize = "http.server.request.size" // Incoming request bytes total
+ serverResponseSize = "http.server.response.size" // Incoming response bytes total
+ serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
+)
+
+func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
+ if meter == nil {
+ return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
+ }
+ var err error
+ requestBytesCounter, err := meter.Int64Counter(
+ serverRequestSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP request messages."),
+ )
+ handleErr(err)
+
+ responseBytesCounter, err := meter.Int64Counter(
+ serverResponseSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP response messages."),
+ )
+ handleErr(err)
+
+ serverLatencyMeasure, err := meter.Float64Histogram(
+ serverDuration,
+ metric.WithUnit("ms"),
+ metric.WithDescription("Measures the duration of inbound HTTP requests."),
+ )
+ handleErr(err)
+
+ return requestBytesCounter, responseBytesCounter, serverLatencyMeasure
+}
+
+func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
+ n := len(additionalAttributes) + 3
+ var host string
+ var p int
+ if server == "" {
+ host, p = SplitHostPort(req.Host)
+ } else {
+ // Prioritize the primary server name.
+ host, p = SplitHostPort(server)
+ if p < 0 {
+ _, p = SplitHostPort(req.Host)
+ }
+ }
+ hostPort := requiredHTTPPort(req.TLS != nil, p)
+ if hostPort > 0 {
+ n++
+ }
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" {
+ n++
+ }
+ if protoVersion != "" {
+ n++
+ }
+
+ if statusCode > 0 {
+ n++
+ }
+
+ attributes := slices.Grow(additionalAttributes, n)
+ attributes = append(attributes,
+ semconv.HTTPMethod(standardizeHTTPMethod(req.Method)),
+ o.scheme(req.TLS != nil),
+ semconv.NetHostName(host))
+
+ if hostPort > 0 {
+ attributes = append(attributes, semconv.NetHostPort(hostPort))
+ }
+ if protoName != "" {
+ attributes = append(attributes, semconv.NetProtocolName(protoName))
+ }
+ if protoVersion != "" {
+ attributes = append(attributes, semconv.NetProtocolVersion(protoVersion))
+ }
+
+ if statusCode > 0 {
+ attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
+ }
+ return attributes
+}
+
+func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
+ if https {
+ return semconv.HTTPSchemeHTTPS
+ }
+ return semconv.HTTPSchemeHTTP
+}
+
+type OldHTTPClient struct{}
+
+func (o OldHTTPClient) RequestTraceAttrs(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
+ return semconvutil.HTTPClientRequest(req, attrs)
+}
+
+func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue {
+ return semconvutil.HTTPClientResponse(resp, attrs)
+}
+
+func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.method string
+ http.status_code int
+ net.peer.name string
+ net.peer.port int
+ */
+
+ n := 2 // method, peer name.
+ var h string
+ if req.URL != nil {
+ h = req.URL.Host
+ }
+ var requestHost string
+ var requestPort int
+ for _, hostport := range []string{h, req.Header.Get("Host")} {
+ requestHost, requestPort = SplitHostPort(hostport)
+ if requestHost != "" || requestPort > 0 {
+ break
+ }
+ }
+
+ port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
+ if port > 0 {
+ n++
+ }
+
+ if statusCode > 0 {
+ n++
+ }
+
+ attributes := slices.Grow(additionalAttributes, n)
+ attributes = append(attributes,
+ semconv.HTTPMethod(standardizeHTTPMethod(req.Method)),
+ semconv.NetPeerName(requestHost),
+ )
+
+ if port > 0 {
+ attributes = append(attributes, semconv.NetPeerPort(port))
+ }
+
+ if statusCode > 0 {
+ attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
+ }
+ return attributes
+}
+
+// Client HTTP metrics.
+const (
+ clientRequestSize = "http.client.request.size" // Incoming request bytes total
+ clientResponseSize = "http.client.response.size" // Incoming response bytes total
+ clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds
+)
+
+func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
+ if meter == nil {
+ return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
+ }
+ requestBytesCounter, err := meter.Int64Counter(
+ clientRequestSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP request messages."),
+ )
+ handleErr(err)
+
+ responseBytesCounter, err := meter.Int64Counter(
+ clientResponseSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP response messages."),
+ )
+ handleErr(err)
+
+ latencyMeasure, err := meter.Float64Histogram(
+ clientDuration,
+ metric.WithUnit("ms"),
+ metric.WithDescription("Measures the duration of outbound HTTP requests."),
+ )
+ handleErr(err)
+
+ return requestBytesCounter, responseBytesCounter, latencyMeasure
+}
+
+// TraceAttributes returns attributes for httptrace.
+func (c OldHTTPClient) TraceAttributes(host string, attrs []attribute.KeyValue) []attribute.KeyValue {
+ return append(attrs, semconv.NetHostName(host))
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
new file mode 100644
index 000000000..7aa5f99e8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+
+// Generate semconvutil package:
+//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
new file mode 100644
index 000000000..b99735479
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
@@ -0,0 +1,594 @@
+// Code generated by gotmpl. DO NOT MODIFY.
+// source: internal/shared/semconvutil/httpconv.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconvutil provides OpenTelemetry semantic convention utilities.
+package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+
+import (
+ "fmt"
+ "net/http"
+ "slices"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+)
+
+type HTTPServerRequestOptions struct {
+ // If set, this is used as value for the "http.client_ip" attribute.
+ HTTPClientIP string
+}
+
+// HTTPClientResponse returns trace attributes for an HTTP response received by a
+// client from a server. It will return the following attributes if the related
+// values are defined in resp: "http.status.code",
+// "http.response_content_length".
+//
+// This does not add all OpenTelemetry required attributes for an HTTP event,
+// it assumes ClientRequest was used to create the span with a complete set of
+// attributes. If a complete set of attributes can be generated using the
+// request contained in resp. For example:
+//
+// HTTPClientResponse(resp, ClientRequest(resp.Request)))
+func HTTPClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue {
+ return hc.ClientResponse(resp, attrs)
+}
+
+// HTTPClientRequest returns trace attributes for an HTTP request made by a client.
+// The following attributes are always returned: "http.url", "http.method",
+// "net.peer.name". The following attributes are returned if the related values
+// are defined in req: "net.peer.port", "user_agent.original",
+// "http.request_content_length".
+func HTTPClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
+ return hc.ClientRequest(req, attrs)
+}
+
+// HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client.
+// The following attributes are always returned: "http.method", "net.peer.name".
+// The following attributes are returned if the
+// related values are defined in req: "net.peer.port".
+func HTTPClientRequestMetrics(req *http.Request) []attribute.KeyValue {
+ return hc.ClientRequestMetrics(req)
+}
+
+// HTTPClientStatus returns a span status code and message for an HTTP status code
+// value received by a client.
+func HTTPClientStatus(code int) (codes.Code, string) {
+ return hc.ClientStatus(code)
+}
+
+// HTTPServerRequest returns trace attributes for an HTTP request received by a
+// server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "http.target", "net.host.name". The following attributes are returned if
+// they related values are defined in req: "net.host.port", "net.sock.peer.addr",
+// "net.sock.peer.port", "user_agent.original", "http.client_ip".
+func HTTPServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue {
+ return hc.ServerRequest(server, req, opts, attrs)
+}
+
+// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a
+// server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "net.host.name". The following attributes are returned if they related
+// values are defined in req: "net.host.port".
+func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue {
+ return hc.ServerRequestMetrics(server, req)
+}
+
+// HTTPServerStatus returns a span status code and message for an HTTP status code
+// value returned by a server. Status codes in the 400-499 range are not
+// returned as errors.
+func HTTPServerStatus(code int) (codes.Code, string) {
+ return hc.ServerStatus(code)
+}
+
+// httpConv are the HTTP semantic convention attributes defined for a version
+// of the OpenTelemetry specification.
+type httpConv struct {
+ NetConv *netConv
+
+ HTTPClientIPKey attribute.Key
+ HTTPMethodKey attribute.Key
+ HTTPRequestContentLengthKey attribute.Key
+ HTTPResponseContentLengthKey attribute.Key
+ HTTPRouteKey attribute.Key
+ HTTPSchemeHTTP attribute.KeyValue
+ HTTPSchemeHTTPS attribute.KeyValue
+ HTTPStatusCodeKey attribute.Key
+ HTTPTargetKey attribute.Key
+ HTTPURLKey attribute.Key
+ UserAgentOriginalKey attribute.Key
+}
+
+var hc = &httpConv{
+ NetConv: nc,
+
+ HTTPClientIPKey: semconv.HTTPClientIPKey,
+ HTTPMethodKey: semconv.HTTPMethodKey,
+ HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey,
+ HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey,
+ HTTPRouteKey: semconv.HTTPRouteKey,
+ HTTPSchemeHTTP: semconv.HTTPSchemeHTTP,
+ HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS,
+ HTTPStatusCodeKey: semconv.HTTPStatusCodeKey,
+ HTTPTargetKey: semconv.HTTPTargetKey,
+ HTTPURLKey: semconv.HTTPURLKey,
+ UserAgentOriginalKey: semconv.UserAgentOriginalKey,
+}
+
+// ClientResponse returns attributes for an HTTP response received by a client
+// from a server. The following attributes are returned if the related values
+// are defined in resp: "http.status.code", "http.response_content_length".
+//
+// This does not add all OpenTelemetry required attributes for an HTTP event,
+// it assumes ClientRequest was used to create the span with a complete set of
+// attributes. If a complete set of attributes can be generated using the
+// request contained in resp. For example:
+//
+// ClientResponse(resp, ClientRequest(resp.Request))
+func (c *httpConv) ClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.status_code int
+ http.response_content_length int
+ */
+ var n int
+ if resp.StatusCode > 0 {
+ n++
+ }
+ if resp.ContentLength > 0 {
+ n++
+ }
+ if n == 0 {
+ return attrs
+ }
+
+ attrs = slices.Grow(attrs, n)
+ if resp.StatusCode > 0 {
+ attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode))
+ }
+ if resp.ContentLength > 0 {
+ attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength)))
+ }
+ return attrs
+}
+
+// ClientRequest returns attributes for an HTTP request made by a client. The
+// following attributes are always returned: "http.url", "http.method",
+// "net.peer.name". The following attributes are returned if the related values
+// are defined in req: "net.peer.port", "user_agent.original",
+// "http.request_content_length", "user_agent.original".
+func (c *httpConv) ClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.method string
+ user_agent.original string
+ http.url string
+ net.peer.name string
+ net.peer.port int
+ http.request_content_length int
+ */
+
+ /* The following semantic conventions are not returned:
+ http.status_code This requires the response. See ClientResponse.
+ http.response_content_length This requires the response. See ClientResponse.
+ net.sock.family This requires the socket used.
+ net.sock.peer.addr This requires the socket used.
+ net.sock.peer.name This requires the socket used.
+ net.sock.peer.port This requires the socket used.
+ http.resend_count This is something outside of a single request.
+ net.protocol.name The value is the Request is ignored, and the go client will always use "http".
+ net.protocol.version The value in the Request is ignored, and the go client will always use 1.1 or 2.0.
+ */
+ n := 3 // URL, peer name, proto, and method.
+ var h string
+ if req.URL != nil {
+ h = req.URL.Host
+ }
+ peer, p := firstHostPort(h, req.Header.Get("Host"))
+ port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
+ if port > 0 {
+ n++
+ }
+ useragent := req.UserAgent()
+ if useragent != "" {
+ n++
+ }
+ if req.ContentLength > 0 {
+ n++
+ }
+
+ attrs = slices.Grow(attrs, n)
+ attrs = append(attrs, c.method(req.Method))
+
+ var u string
+ if req.URL != nil {
+ // Remove any username/password info that may be in the URL.
+ userinfo := req.URL.User
+ req.URL.User = nil
+ u = req.URL.String()
+ // Restore any username/password info that was removed.
+ req.URL.User = userinfo
+ }
+ attrs = append(attrs, c.HTTPURLKey.String(u))
+
+ attrs = append(attrs, c.NetConv.PeerName(peer))
+ if port > 0 {
+ attrs = append(attrs, c.NetConv.PeerPort(port))
+ }
+
+ if useragent != "" {
+ attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
+ }
+
+ if l := req.ContentLength; l > 0 {
+ attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l))
+ }
+
+ return attrs
+}
+
+// ClientRequestMetrics returns metric attributes for an HTTP request made by a client. The
+// following attributes are always returned: "http.method", "net.peer.name".
+// The following attributes are returned if the related values
+// are defined in req: "net.peer.port".
+func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.method string
+ net.peer.name string
+ net.peer.port int
+ */
+
+ n := 2 // method, peer name.
+ var h string
+ if req.URL != nil {
+ h = req.URL.Host
+ }
+ peer, p := firstHostPort(h, req.Header.Get("Host"))
+ port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
+ if port > 0 {
+ n++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+ attrs = append(attrs, c.method(req.Method), c.NetConv.PeerName(peer))
+
+ if port > 0 {
+ attrs = append(attrs, c.NetConv.PeerPort(port))
+ }
+
+ return attrs
+}
+
+// ServerRequest returns attributes for an HTTP request received by a server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "http.target", "net.host.name". The following attributes are returned if they
+// related values are defined in req: "net.host.port", "net.sock.peer.addr",
+// "net.sock.peer.port", "user_agent.original", "http.client_ip",
+// "net.protocol.name", "net.protocol.version".
+func (c *httpConv) ServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.method string
+ http.scheme string
+ net.host.name string
+ net.host.port int
+ net.sock.peer.addr string
+ net.sock.peer.port int
+ user_agent.original string
+ http.client_ip string
+ net.protocol.name string Note: not set if the value is "http".
+ net.protocol.version string
+ http.target string Note: doesn't include the query parameter.
+ */
+
+ /* The following semantic conventions are not returned:
+ http.status_code This requires the response.
+ http.request_content_length This requires the len() of body, which can mutate it.
+ http.response_content_length This requires the response.
+ http.route This is not available.
+ net.sock.peer.name This would require a DNS lookup.
+ net.sock.host.addr The request doesn't have access to the underlying socket.
+ net.sock.host.port The request doesn't have access to the underlying socket.
+
+ */
+ n := 4 // Method, scheme, proto, and host name.
+ var host string
+ var p int
+ if server == "" {
+ host, p = splitHostPort(req.Host)
+ } else {
+ // Prioritize the primary server name.
+ host, p = splitHostPort(server)
+ if p < 0 {
+ _, p = splitHostPort(req.Host)
+ }
+ }
+ hostPort := requiredHTTPPort(req.TLS != nil, p)
+ if hostPort > 0 {
+ n++
+ }
+ peer, peerPort := splitHostPort(req.RemoteAddr)
+ if peer != "" {
+ n++
+ if peerPort > 0 {
+ n++
+ }
+ }
+ useragent := req.UserAgent()
+ if useragent != "" {
+ n++
+ }
+
+ // For client IP, use, in order:
+ // 1. The value passed in the options
+ // 2. The value in the X-Forwarded-For header
+ // 3. The peer address
+ clientIP := opts.HTTPClientIP
+ if clientIP == "" {
+ clientIP = serverClientIP(req.Header.Get("X-Forwarded-For"))
+ if clientIP == "" {
+ clientIP = peer
+ }
+ }
+ if clientIP != "" {
+ n++
+ }
+
+ var target string
+ if req.URL != nil {
+ target = req.URL.Path
+ if target != "" {
+ n++
+ }
+ }
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" && protoName != "http" {
+ n++
+ }
+ if protoVersion != "" {
+ n++
+ }
+
+ attrs = slices.Grow(attrs, n)
+
+ attrs = append(attrs, c.method(req.Method))
+ attrs = append(attrs, c.scheme(req.TLS != nil))
+ attrs = append(attrs, c.NetConv.HostName(host))
+
+ if hostPort > 0 {
+ attrs = append(attrs, c.NetConv.HostPort(hostPort))
+ }
+
+ if peer != "" {
+ // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
+ // file-path that would be interpreted with a sock family.
+ attrs = append(attrs, c.NetConv.SockPeerAddr(peer))
+ if peerPort > 0 {
+ attrs = append(attrs, c.NetConv.SockPeerPort(peerPort))
+ }
+ }
+
+ if useragent != "" {
+ attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
+ }
+
+ if clientIP != "" {
+ attrs = append(attrs, c.HTTPClientIPKey.String(clientIP))
+ }
+
+ if target != "" {
+ attrs = append(attrs, c.HTTPTargetKey.String(target))
+ }
+
+ if protoName != "" && protoName != "http" {
+ attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName))
+ }
+ if protoVersion != "" {
+ attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion))
+ }
+
+ return attrs
+}
+
+// ServerRequestMetrics returns metric attributes for an HTTP request received
+// by a server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "net.host.name". The following attributes are returned if they related
+// values are defined in req: "net.host.port".
+func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.scheme string
+ http.route string
+ http.method string
+ http.status_code int
+ net.host.name string
+ net.host.port int
+ net.protocol.name string Note: not set if the value is "http".
+ net.protocol.version string
+ */
+
+ n := 3 // Method, scheme, and host name.
+ var host string
+ var p int
+ if server == "" {
+ host, p = splitHostPort(req.Host)
+ } else {
+ // Prioritize the primary server name.
+ host, p = splitHostPort(server)
+ if p < 0 {
+ _, p = splitHostPort(req.Host)
+ }
+ }
+ hostPort := requiredHTTPPort(req.TLS != nil, p)
+ if hostPort > 0 {
+ n++
+ }
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" {
+ n++
+ }
+ if protoVersion != "" {
+ n++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+
+ attrs = append(attrs, c.methodMetric(req.Method))
+ attrs = append(attrs, c.scheme(req.TLS != nil))
+ attrs = append(attrs, c.NetConv.HostName(host))
+
+ if hostPort > 0 {
+ attrs = append(attrs, c.NetConv.HostPort(hostPort))
+ }
+ if protoName != "" {
+ attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName))
+ }
+ if protoVersion != "" {
+ attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion))
+ }
+
+ return attrs
+}
+
+func (c *httpConv) method(method string) attribute.KeyValue {
+ if method == "" {
+ return c.HTTPMethodKey.String(http.MethodGet)
+ }
+ return c.HTTPMethodKey.String(method)
+}
+
+func (c *httpConv) methodMetric(method string) attribute.KeyValue {
+ method = strings.ToUpper(method)
+ switch method {
+ case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
+ default:
+ method = "_OTHER"
+ }
+ return c.HTTPMethodKey.String(method)
+}
+
+func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive
+ if https {
+ return c.HTTPSchemeHTTPS
+ }
+ return c.HTTPSchemeHTTP
+}
+
+func serverClientIP(xForwardedFor string) string {
+ if idx := strings.Index(xForwardedFor, ","); idx >= 0 {
+ xForwardedFor = xForwardedFor[:idx]
+ }
+ return xForwardedFor
+}
+
+func requiredHTTPPort(https bool, port int) int { // nolint:revive
+ if https {
+ if port > 0 && port != 443 {
+ return port
+ }
+ } else {
+ if port > 0 && port != 80 {
+ return port
+ }
+ }
+ return -1
+}
+
+// Return the request host and port from the first non-empty source.
+func firstHostPort(source ...string) (host string, port int) {
+ for _, hostport := range source {
+ host, port = splitHostPort(hostport)
+ if host != "" || port > 0 {
+ break
+ }
+ }
+ return
+}
+
+// ClientStatus returns a span status code and message for an HTTP status code
+// value received by a client.
+func (c *httpConv) ClientStatus(code int) (codes.Code, string) {
+ if code < 100 || code >= 600 {
+ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+ if code >= 400 {
+ return codes.Error, ""
+ }
+ return codes.Unset, ""
+}
+
+// ServerStatus returns a span status code and message for an HTTP status code
+// value returned by a server. Status codes in the 400-499 range are not
+// returned as errors.
+func (c *httpConv) ServerStatus(code int) (codes.Code, string) {
+ if code < 100 || code >= 600 {
+ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+ if code >= 500 {
+ return codes.Error, ""
+ }
+ return codes.Unset, ""
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
new file mode 100644
index 000000000..df97255e4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
@@ -0,0 +1,214 @@
+// Code generated by gotmpl. DO NOT MODIFY.
+// source: internal/shared/semconvutil/netconv.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+
+import (
+ "net"
+ "strconv"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+)
+
+// NetTransport returns a trace attribute describing the transport protocol of the
+// passed network. See the net.Dial for information about acceptable network
+// values.
+func NetTransport(network string) attribute.KeyValue {
+ return nc.Transport(network)
+}
+
+// netConv are the network semantic convention attributes defined for a version
+// of the OpenTelemetry specification.
+type netConv struct {
+ NetHostNameKey attribute.Key
+ NetHostPortKey attribute.Key
+ NetPeerNameKey attribute.Key
+ NetPeerPortKey attribute.Key
+ NetProtocolName attribute.Key
+ NetProtocolVersion attribute.Key
+ NetSockFamilyKey attribute.Key
+ NetSockPeerAddrKey attribute.Key
+ NetSockPeerPortKey attribute.Key
+ NetSockHostAddrKey attribute.Key
+ NetSockHostPortKey attribute.Key
+ NetTransportOther attribute.KeyValue
+ NetTransportTCP attribute.KeyValue
+ NetTransportUDP attribute.KeyValue
+ NetTransportInProc attribute.KeyValue
+}
+
+var nc = &netConv{
+ NetHostNameKey: semconv.NetHostNameKey,
+ NetHostPortKey: semconv.NetHostPortKey,
+ NetPeerNameKey: semconv.NetPeerNameKey,
+ NetPeerPortKey: semconv.NetPeerPortKey,
+ NetProtocolName: semconv.NetProtocolNameKey,
+ NetProtocolVersion: semconv.NetProtocolVersionKey,
+ NetSockFamilyKey: semconv.NetSockFamilyKey,
+ NetSockPeerAddrKey: semconv.NetSockPeerAddrKey,
+ NetSockPeerPortKey: semconv.NetSockPeerPortKey,
+ NetSockHostAddrKey: semconv.NetSockHostAddrKey,
+ NetSockHostPortKey: semconv.NetSockHostPortKey,
+ NetTransportOther: semconv.NetTransportOther,
+ NetTransportTCP: semconv.NetTransportTCP,
+ NetTransportUDP: semconv.NetTransportUDP,
+ NetTransportInProc: semconv.NetTransportInProc,
+}
+
+func (c *netConv) Transport(network string) attribute.KeyValue {
+ switch network {
+ case "tcp", "tcp4", "tcp6":
+ return c.NetTransportTCP
+ case "udp", "udp4", "udp6":
+ return c.NetTransportUDP
+ case "unix", "unixgram", "unixpacket":
+ return c.NetTransportInProc
+ default:
+ // "ip:*", "ip4:*", and "ip6:*" all are considered other.
+ return c.NetTransportOther
+ }
+}
+
+// Host returns attributes for a network host address.
+func (c *netConv) Host(address string) []attribute.KeyValue {
+ h, p := splitHostPort(address)
+ var n int
+ if h != "" {
+ n++
+ if p > 0 {
+ n++
+ }
+ }
+
+ if n == 0 {
+ return nil
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+ attrs = append(attrs, c.HostName(h))
+ if p > 0 {
+ attrs = append(attrs, c.HostPort(p))
+ }
+ return attrs
+}
+
+func (c *netConv) HostName(name string) attribute.KeyValue {
+ return c.NetHostNameKey.String(name)
+}
+
+func (c *netConv) HostPort(port int) attribute.KeyValue {
+ return c.NetHostPortKey.Int(port)
+}
+
+func family(network, address string) string {
+ switch network {
+ case "unix", "unixgram", "unixpacket":
+ return "unix"
+ default:
+ if ip := net.ParseIP(address); ip != nil {
+ if ip.To4() == nil {
+ return "inet6"
+ }
+ return "inet"
+ }
+ }
+ return ""
+}
+
+// Peer returns attributes for a network peer address.
+func (c *netConv) Peer(address string) []attribute.KeyValue {
+ h, p := splitHostPort(address)
+ var n int
+ if h != "" {
+ n++
+ if p > 0 {
+ n++
+ }
+ }
+
+ if n == 0 {
+ return nil
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+ attrs = append(attrs, c.PeerName(h))
+ if p > 0 {
+ attrs = append(attrs, c.PeerPort(p))
+ }
+ return attrs
+}
+
+func (c *netConv) PeerName(name string) attribute.KeyValue {
+ return c.NetPeerNameKey.String(name)
+}
+
+func (c *netConv) PeerPort(port int) attribute.KeyValue {
+ return c.NetPeerPortKey.Int(port)
+}
+
+func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue {
+ return c.NetSockPeerAddrKey.String(addr)
+}
+
+func (c *netConv) SockPeerPort(port int) attribute.KeyValue {
+ return c.NetSockPeerPortKey.Int(port)
+}
+
+// splitHostPort splits a network address hostport of the form "host",
+// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
+// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
+// port.
+//
+// An empty host is returned if it is not provided or unparsable. A negative
+// port is returned if it is not provided or unparsable.
+func splitHostPort(hostport string) (host string, port int) {
+ port = -1
+
+ if strings.HasPrefix(hostport, "[") {
+ addrEnd := strings.LastIndex(hostport, "]")
+ if addrEnd < 0 {
+ // Invalid hostport.
+ return
+ }
+ if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 {
+ host = hostport[1:addrEnd]
+ return
+ }
+ } else {
+ if i := strings.LastIndex(hostport, ":"); i < 0 {
+ host = hostport
+ return
+ }
+ }
+
+ host, pStr, err := net.SplitHostPort(hostport)
+ if err != nil {
+ return
+ }
+
+ p, err := strconv.ParseUint(pStr, 10, 16)
+ if err != nil {
+ return
+ }
+ return host, int(p) // nolint: gosec // Bitsize checked to be 16 above.
+}
+
+func netProtocol(proto string) (name string, version string) {
+ name, version, _ = strings.Cut(proto, "/")
+ switch name {
+ case "HTTP":
+ name = "http"
+ case "QUIC":
+ name = "quic"
+ case "SPDY":
+ name = "spdy"
+ default:
+ name = strings.ToLower(name)
+ }
+ return name, version
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go
new file mode 100644
index 000000000..d62ce44b0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go
@@ -0,0 +1,58 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "sync"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Labeler is used to allow instrumented HTTP handlers to add custom attributes to
+// the metrics recorded by the net/http instrumentation.
+type Labeler struct {
+ mu sync.Mutex
+ attributes []attribute.KeyValue
+}
+
+// Add attributes to a Labeler.
+func (l *Labeler) Add(ls ...attribute.KeyValue) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.attributes = append(l.attributes, ls...)
+}
+
+// Get returns a copy of the attributes added to the Labeler.
+func (l *Labeler) Get() []attribute.KeyValue {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ ret := make([]attribute.KeyValue, len(l.attributes))
+ copy(ret, l.attributes)
+ return ret
+}
+
+type labelerContextKeyType int
+
+const labelerContextKey labelerContextKeyType = 0
+
+// ContextWithLabeler returns a new context with the provided Labeler instance.
+// Attributes added to the specified labeler will be injected into metrics
+// emitted by the instrumentation. Only one labeller can be injected into the
+// context. Injecting it multiple times will override the previous calls.
+func ContextWithLabeler(parent context.Context, l *Labeler) context.Context {
+ return context.WithValue(parent, labelerContextKey, l)
+}
+
+// LabelerFromContext retrieves a Labeler instance from the provided context if
+// one is available. If no Labeler was found in the provided context a new, empty
+// Labeler is returned and the second return value is false. In this case it is
+// safe to use the Labeler but any attributes added to it will not be used.
+func LabelerFromContext(ctx context.Context) (*Labeler, bool) {
+ l, ok := ctx.Value(labelerContextKey).(*Labeler)
+ if !ok {
+ l = &Labeler{}
+ }
+ return l, ok
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go
new file mode 100644
index 000000000..9476ef01b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go
@@ -0,0 +1,29 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "time"
+)
+
+type startTimeContextKeyType int
+
+const startTimeContextKey startTimeContextKeyType = 0
+
+// ContextWithStartTime returns a new context with the provided start time. The
+// start time will be used for metrics and traces emitted by the
+// instrumentation. Only one labeller can be injected into the context.
+// Injecting it multiple times will override the previous calls.
+func ContextWithStartTime(parent context.Context, start time.Time) context.Context {
+ return context.WithValue(parent, startTimeContextKey, start)
+}
+
+// StartTimeFromContext retrieves a time.Time from the provided context if one
+// is available. If no start time was found in the provided context, a new,
+// zero start time is returned and the second return value is false.
+func StartTimeFromContext(ctx context.Context) time.Time {
+ t, _ := ctx.Value(startTimeContextKey).(time.Time)
+ return t
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
new file mode 100644
index 000000000..e4c02a429
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
@@ -0,0 +1,279 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "net/http/httptrace"
+ "sync/atomic"
+ "time"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/propagation"
+
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Transport implements the http.RoundTripper interface and wraps
+// outbound HTTP(S) requests with a span and enriches it with metrics.
+type Transport struct {
+ rt http.RoundTripper
+
+ tracer trace.Tracer
+ propagators propagation.TextMapPropagator
+ spanStartOptions []trace.SpanStartOption
+ filters []Filter
+ spanNameFormatter func(string, *http.Request) string
+ clientTrace func(context.Context) *httptrace.ClientTrace
+ metricAttributesFn func(*http.Request) []attribute.KeyValue
+
+ semconv semconv.HTTPClient
+}
+
+var _ http.RoundTripper = &Transport{}
+
+// NewTransport wraps the provided http.RoundTripper with one that
+// starts a span, injects the span context into the outbound request headers,
+// and enriches it with metrics.
+//
+// If the provided http.RoundTripper is nil, http.DefaultTransport will be used
+// as the base http.RoundTripper.
+func NewTransport(base http.RoundTripper, opts ...Option) *Transport {
+ if base == nil {
+ base = http.DefaultTransport
+ }
+
+ t := Transport{
+ rt: base,
+ }
+
+ defaultOpts := []Option{
+ WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)),
+ WithSpanNameFormatter(defaultTransportFormatter),
+ }
+
+ c := newConfig(append(defaultOpts, opts...)...)
+ t.applyConfig(c)
+
+ return &t
+}
+
+func (t *Transport) applyConfig(c *config) {
+ t.tracer = c.Tracer
+ t.propagators = c.Propagators
+ t.spanStartOptions = c.SpanStartOptions
+ t.filters = c.Filters
+ t.spanNameFormatter = c.SpanNameFormatter
+ t.clientTrace = c.ClientTrace
+ t.semconv = semconv.NewHTTPClient(c.Meter)
+ t.metricAttributesFn = c.MetricAttributesFn
+}
+
+func defaultTransportFormatter(_ string, r *http.Request) string {
+ return "HTTP " + r.Method
+}
+
+// RoundTrip creates a Span and propagates its context via the provided request's headers
+// before handing the request to the configured base RoundTripper. The created span will
+// end when the response body is closed or when a read from the body returns io.EOF.
+func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
+ requestStartTime := time.Now()
+ for _, f := range t.filters {
+ if !f(r) {
+ // Simply pass through to the base RoundTripper if a filter rejects the request
+ return t.rt.RoundTrip(r)
+ }
+ }
+
+ tracer := t.tracer
+
+ if tracer == nil {
+ if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() {
+ tracer = newTracer(span.TracerProvider())
+ } else {
+ tracer = newTracer(otel.GetTracerProvider())
+ }
+ }
+
+ opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options
+
+ ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...)
+
+ if t.clientTrace != nil {
+ ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx))
+ }
+
+ labeler, found := LabelerFromContext(ctx)
+ if !found {
+ ctx = ContextWithLabeler(ctx, labeler)
+ }
+
+ r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request.
+
+ // if request body is nil or NoBody, we don't want to mutate the body as it
+ // will affect the identity of it in an unforeseeable way because we assert
+ // ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
+ bw := request.NewBodyWrapper(r.Body, func(int64) {})
+ if r.Body != nil && r.Body != http.NoBody {
+ r.Body = bw
+ }
+
+ span.SetAttributes(t.semconv.RequestTraceAttrs(r)...)
+ t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header))
+
+ res, err := t.rt.RoundTrip(r)
+
+ // Defer metrics recording function to record the metrics on error or no error.
+ defer func() {
+ metricAttributes := semconv.MetricAttributes{
+ Req: r,
+ AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...),
+ }
+
+ if err == nil {
+ metricAttributes.StatusCode = res.StatusCode
+ }
+
+ metricOpts := t.semconv.MetricOptions(metricAttributes)
+
+ metricData := semconv.MetricData{
+ RequestSize: bw.BytesRead(),
+ }
+
+ if err == nil {
+ // For handling response bytes we leverage a callback when the client reads the http response
+ readRecordFunc := func(n int64) {
+ t.semconv.RecordResponseSize(ctx, n, metricOpts)
+ }
+
+ res.Body = newWrappedBody(span, readRecordFunc, res.Body)
+ }
+
+ // Use floating point division here for higher precision (instead of Millisecond method).
+ elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
+
+ metricData.ElapsedTime = elapsedTime
+
+ t.semconv.RecordMetrics(ctx, metricData, metricOpts)
+ }()
+
+ if err != nil {
+ // set error type attribute if the error is part of the predefined
+ // error types.
+ // otherwise, record it as an exception
+ if errType := t.semconv.ErrorType(err); errType.Valid() {
+ span.SetAttributes(errType)
+ } else {
+ span.RecordError(err)
+ }
+
+ span.SetStatus(codes.Error, err.Error())
+ span.End()
+
+ return res, err
+ }
+
+ // traces
+ span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...)
+ span.SetStatus(t.semconv.Status(res.StatusCode))
+
+ return res, nil
+}
+
+func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue {
+ var attributeForRequest []attribute.KeyValue
+ if t.metricAttributesFn != nil {
+ attributeForRequest = t.metricAttributesFn(r)
+ }
+ return attributeForRequest
+}
+
+// newWrappedBody returns a new and appropriately scoped *wrappedBody as an
+// io.ReadCloser. If the passed body implements io.Writer, the returned value
+// will implement io.ReadWriteCloser.
+func newWrappedBody(span trace.Span, record func(n int64), body io.ReadCloser) io.ReadCloser {
+ // The successful protocol switch responses will have a body that
+ // implement an io.ReadWriteCloser. Ensure this interface type continues
+ // to be satisfied if that is the case.
+ if _, ok := body.(io.ReadWriteCloser); ok {
+ return &wrappedBody{span: span, record: record, body: body}
+ }
+
+ // Remove the implementation of the io.ReadWriteCloser and only implement
+ // the io.ReadCloser.
+ return struct{ io.ReadCloser }{&wrappedBody{span: span, record: record, body: body}}
+}
+
+// wrappedBody is the response body type returned by the transport
+// instrumentation to complete a span. Errors encountered when using the
+// response body are recorded in span tracking the response.
+//
+// The span tracking the response is ended when this body is closed.
+//
+// If the response body implements the io.Writer interface (i.e. for
+// successful protocol switches), the wrapped body also will.
+type wrappedBody struct {
+ span trace.Span
+ recorded atomic.Bool
+ record func(n int64)
+ body io.ReadCloser
+ read atomic.Int64
+}
+
+var _ io.ReadWriteCloser = &wrappedBody{}
+
+func (wb *wrappedBody) Write(p []byte) (int, error) {
+ // This will not panic given the guard in newWrappedBody.
+ n, err := wb.body.(io.Writer).Write(p)
+ if err != nil {
+ wb.span.RecordError(err)
+ wb.span.SetStatus(codes.Error, err.Error())
+ }
+ return n, err
+}
+
+func (wb *wrappedBody) Read(b []byte) (int, error) {
+ n, err := wb.body.Read(b)
+ // Record the number of bytes read
+ wb.read.Add(int64(n))
+
+ switch err {
+ case nil:
+ // nothing to do here but fall through to the return
+ case io.EOF:
+ wb.recordBytesRead()
+ wb.span.End()
+ default:
+ wb.span.RecordError(err)
+ wb.span.SetStatus(codes.Error, err.Error())
+ }
+ return n, err
+}
+
+// recordBytesRead is a function that ensures the number of bytes read is recorded once and only once.
+func (wb *wrappedBody) recordBytesRead() {
+ // note: it is more performant (and equally correct) to use atomic.Bool over sync.Once here. In the event that
+ // two goroutines are racing to call this method, the number of bytes read will no longer increase. Using
+ // CompareAndSwap allows later goroutines to return quickly and not block waiting for the race winner to finish
+ // calling wb.record(wb.read.Load()).
+ if wb.recorded.CompareAndSwap(false, true) {
+ // Record the total number of bytes read
+ wb.record(wb.read.Load())
+ }
+}
+
+func (wb *wrappedBody) Close() error {
+ wb.recordBytesRead()
+ wb.span.End()
+ if wb.body != nil {
+ return wb.body.Close()
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
new file mode 100644
index 000000000..2fe5a1361
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+// Version is the current release version of the otelhttp instrumentation.
+func Version() string {
+ return "0.62.0"
+ // This string is updated by the pre_release.sh script during release
+}
diff --git a/vendor/go.opentelemetry.io/otel/.clomonitor.yml b/vendor/go.opentelemetry.io/otel/.clomonitor.yml
new file mode 100644
index 000000000..128d61a22
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.clomonitor.yml
@@ -0,0 +1,3 @@
+exemptions:
+ - check: artifacthub_badge
+ reason: "Artifact Hub doesn't support Go packages"
diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore
new file mode 100644
index 000000000..6bf3abc41
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.codespellignore
@@ -0,0 +1,9 @@
+ot
+fo
+te
+collison
+consequentially
+ans
+nam
+valu
+thirdparty
diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc
new file mode 100644
index 000000000..e2cb3ea94
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.codespellrc
@@ -0,0 +1,10 @@
+# https://github.com/codespell-project/codespell
+[codespell]
+builtin = clear,rare,informal
+check-filenames =
+check-hidden =
+ignore-words = .codespellignore
+interactive = 1
+skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools
+uri-ignore-words-list = *
+write =
diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes
new file mode 100644
index 000000000..314766e91
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.gitattributes
@@ -0,0 +1,3 @@
+* text=auto eol=lf
+*.{cmd,[cC][mM][dD]} text eol=crlf
+*.{bat,[bB][aA][tT]} text eol=crlf
diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore
new file mode 100644
index 000000000..749e8e881
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.gitignore
@@ -0,0 +1,15 @@
+.DS_Store
+Thumbs.db
+
+.cache/
+.tools/
+venv/
+.idea/
+.vscode/
+*.iml
+*.so
+coverage.*
+go.work
+go.work.sum
+
+gen/
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
new file mode 100644
index 000000000..5f69cc027
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -0,0 +1,250 @@
+version: "2"
+run:
+ issues-exit-code: 1
+ tests: true
+linters:
+ default: none
+ enable:
+ - asasalint
+ - bodyclose
+ - depguard
+ - errcheck
+ - errorlint
+ - godot
+ - gosec
+ - govet
+ - ineffassign
+ - misspell
+ - perfsprint
+ - revive
+ - staticcheck
+ - testifylint
+ - unconvert
+ - unparam
+ - unused
+ - usestdlibvars
+ - usetesting
+ settings:
+ depguard:
+ rules:
+ auto/sdk:
+ files:
+ - '!internal/global/trace.go'
+ - ~internal/global/trace_test.go
+ deny:
+ - pkg: go.opentelemetry.io/auto/sdk
+ desc: Do not use SDK from automatic instrumentation.
+ non-tests:
+ files:
+ - '!$test'
+ - '!**/*test/*.go'
+ - '!**/internal/matchers/*.go'
+ deny:
+ - pkg: testing
+ - pkg: github.com/stretchr/testify
+ - pkg: crypto/md5
+ - pkg: crypto/sha1
+ - pkg: crypto/**/pkix
+ otel-internal:
+ files:
+ - '**/sdk/*.go'
+ - '**/sdk/**/*.go'
+ - '**/exporters/*.go'
+ - '**/exporters/**/*.go'
+ - '**/schema/*.go'
+ - '**/schema/**/*.go'
+ - '**/metric/*.go'
+ - '**/metric/**/*.go'
+ - '**/bridge/*.go'
+ - '**/bridge/**/*.go'
+ - '**/trace/*.go'
+ - '**/trace/**/*.go'
+ - '**/log/*.go'
+ - '**/log/**/*.go'
+ deny:
+ - pkg: go.opentelemetry.io/otel/internal$
+ desc: Do not use cross-module internal packages.
+ - pkg: go.opentelemetry.io/otel/internal/internaltest
+ desc: Do not use cross-module internal packages.
+ otlp-internal:
+ files:
+ - '!**/exporters/otlp/internal/**/*.go'
+ deny:
+ - pkg: go.opentelemetry.io/otel/exporters/otlp/internal
+ desc: Do not use cross-module internal packages.
+ otlpmetric-internal:
+ files:
+ - '!**/exporters/otlp/otlpmetric/internal/*.go'
+ - '!**/exporters/otlp/otlpmetric/internal/**/*.go'
+ deny:
+ - pkg: go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal
+ desc: Do not use cross-module internal packages.
+ otlptrace-internal:
+ files:
+ - '!**/exporters/otlp/otlptrace/*.go'
+ - '!**/exporters/otlp/otlptrace/internal/**.go'
+ deny:
+ - pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal
+ desc: Do not use cross-module internal packages.
+ godot:
+ exclude:
+ # Exclude links.
+ - '^ *\[[^]]+\]:'
+ # Exclude sentence fragments for lists.
+ - ^[ ]*[-•]
+ # Exclude sentences prefixing a list.
+ - :$
+ misspell:
+ locale: US
+ ignore-rules:
+ - cancelled
+ perfsprint:
+ int-conversion: true
+ err-error: true
+ errorf: true
+ sprintf1: true
+ strconcat: true
+ revive:
+ confidence: 0.01
+ rules:
+ - name: blank-imports
+ - name: bool-literal-in-expr
+ - name: constant-logical-expr
+ - name: context-as-argument
+ arguments:
+ - allowTypesBefore: '*testing.T'
+ disabled: true
+ - name: context-keys-type
+ - name: deep-exit
+ - name: defer
+ arguments:
+ - - call-chain
+ - loop
+ - name: dot-imports
+ - name: duplicated-imports
+ - name: early-return
+ arguments:
+ - preserveScope
+ - name: empty-block
+ - name: empty-lines
+ - name: error-naming
+ - name: error-return
+ - name: error-strings
+ - name: errorf
+ - name: exported
+ arguments:
+ - sayRepetitiveInsteadOfStutters
+ - name: flag-parameter
+ - name: identical-branches
+ - name: if-return
+ - name: import-shadowing
+ - name: increment-decrement
+ - name: indent-error-flow
+ arguments:
+ - preserveScope
+ - name: package-comments
+ - name: range
+ - name: range-val-in-closure
+ - name: range-val-address
+ - name: redefines-builtin-id
+ - name: string-format
+ arguments:
+ - - panic
+ - /^[^\n]*$/
+ - must not contain line breaks
+ - name: struct-tag
+ - name: superfluous-else
+ arguments:
+ - preserveScope
+ - name: time-equal
+ - name: unconditional-recursion
+ - name: unexported-return
+ - name: unhandled-error
+ arguments:
+ - fmt.Fprint
+ - fmt.Fprintf
+ - fmt.Fprintln
+ - fmt.Print
+ - fmt.Printf
+ - fmt.Println
+ - name: unnecessary-stmt
+ - name: useless-break
+ - name: var-declaration
+ - name: var-naming
+ arguments:
+ - ["ID"] # AllowList
+ - ["Otel", "Aws", "Gcp"] # DenyList
+ - name: waitgroup-by-value
+ testifylint:
+ enable-all: true
+ disable:
+ - float-compare
+ - go-require
+ - require-error
+ exclusions:
+ generated: lax
+ presets:
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ rules:
+ - linters:
+ - revive
+ path: schema/v.*/types/.*
+ text: avoid meaningless package names
+ # TODO: Having appropriate comments for exported objects helps development,
+ # even for objects in internal packages. Appropriate comments for all
+ # exported objects should be added and this exclusion removed.
+ - linters:
+ - revive
+ path: .*internal/.*
+ text: exported (method|function|type|const) (.+) should have comment or be unexported
+ # Yes, they are, but it's okay in a test.
+ - linters:
+ - revive
+ path: _test\.go
+ text: exported func.*returns unexported type.*which can be annoying to use
+ # Example test functions should be treated like main.
+ - linters:
+ - revive
+ path: example.*_test\.go
+ text: calls to (.+) only in main[(][)] or init[(][)] functions
+ # It's okay to not run gosec and perfsprint in a test.
+ - linters:
+ - gosec
+ - perfsprint
+ path: _test\.go
+ # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
+ # as we commonly use it in tests and examples.
+ - linters:
+ - gosec
+ text: 'G404:'
+ # Ignoring gosec G402: TLS MinVersion too low
+ # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
+ - linters:
+ - gosec
+ text: 'G402: TLS MinVersion too low.'
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+issues:
+ max-issues-per-linter: 0
+ max-same-issues: 0
+formatters:
+ enable:
+ - gofumpt
+ - goimports
+ - golines
+ settings:
+ goimports:
+ local-prefixes:
+ - go.opentelemetry.io
+ golines:
+ max-len: 120
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore
new file mode 100644
index 000000000..40d62fa2e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.lycheeignore
@@ -0,0 +1,6 @@
+http://localhost
+http://jaeger-collector
+https://github.com/open-telemetry/opentelemetry-go/milestone/
+https://github.com/open-telemetry/opentelemetry-go/projects
+file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries
+file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual
diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml
new file mode 100644
index 000000000..3202496c3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml
@@ -0,0 +1,29 @@
+# Default state for all rules
+default: true
+
+# ul-style
+MD004: false
+
+# hard-tabs
+MD010: false
+
+# line-length
+MD013: false
+
+# no-duplicate-header
+MD024:
+ siblings_only: true
+
+#single-title
+MD025: false
+
+# ol-prefix
+MD029:
+ style: ordered
+
+# no-inline-html
+MD033: false
+
+# fenced-code-language
+MD040: false
+
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
new file mode 100644
index 000000000..4acc75701
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -0,0 +1,3454 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
+
+This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+
+
+
+## [1.37.0/0.59.0/0.13.0] 2025-06-25
+
+### Added
+
+- The `go.opentelemetry.io/otel/semconv/v1.33.0` package.
+ The package contains semantic conventions from the `v1.33.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.33.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.32.0.`(#6799)
+- The `go.opentelemetry.io/otel/semconv/v1.34.0` package.
+ The package contains semantic conventions from the `v1.34.0` version of the OpenTelemetry Semantic Conventions. (#6812)
+- Add metric's schema URL as `otel_scope_schema_url` label in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947)
+- Add metric's scope attributes as `otel_scope_[attribute]` labels in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947)
+- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/log`. (#6825)
+- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6825)
+- Changed handling of `go.opentelemetry.io/otel/exporters/prometheus` metric renaming to add unit suffixes when it doesn't match one of the pre-defined values in the unit suffix map. (#6839)
+
+### Changed
+
+- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/bridge/opentracing`. (#6827)
+- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#6829)
+- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/metric`. (#6832)
+- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/resource`. (#6834)
+- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/trace`. (#6835)
+- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/trace`. (#6836)
+- `Record.Resource` now returns `*resource.Resource` instead of `resource.Resource` in `go.opentelemetry.io/otel/sdk/log`. (#6864)
+- Retry now shows error cause for context timeout in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6898)
+
+### Fixed
+
+- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#6710)
+- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6710)
+- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#6710)
+- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6710)
+- Validate exponential histogram scale range for Prometheus compatibility in `go.opentelemetry.io/otel/exporters/prometheus`. (#6822)
+- Context cancellation during metric pipeline produce does not corrupt data in `go.opentelemetry.io/otel/sdk/metric`. (#6914)
+
+### Removed
+
+- `go.opentelemetry.io/otel/exporters/prometheus` no longer exports `otel_scope_info` metric. (#6770)
+
+## [0.12.2] 2025-05-22
+
+### Fixed
+
+- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` module that contains invalid dependencies. (#6804)
+- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` module that contains invalid dependencies. (#6804)
+- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` module that contains invalid dependencies. (#6804)
+
+## [0.12.1] 2025-05-21
+
+### Fixes
+
+- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6800)
+- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6800)
+- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#6800)
+
+## [1.36.0/0.58.0/0.12.0] 2025-05-20
+
+### Added
+
+- Add exponential histogram support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6421)
+- The `go.opentelemetry.io/otel/semconv/v1.31.0` package.
+ The package contains semantic conventions from the `v1.31.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.31.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.30.0`. (#6479)
+- Add `Recording`, `Scope`, and `Record` types in `go.opentelemetry.io/otel/log/logtest`. (#6507)
+- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6751)
+- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6752)
+- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6688)
+- Add `ValuesGetter` in `go.opentelemetry.io/otel/propagation`, a `TextMapCarrier` that supports retrieving multiple values for a single key. (#5973)
+- Add `Values` method to `HeaderCarrier` to implement the new `ValuesGetter` interface in `go.opentelemetry.io/otel/propagation`. (#5973)
+- Update `Baggage` in `go.opentelemetry.io/otel/propagation` to retrieve multiple values for a key when the carrier implements `ValuesGetter`. (#5973)
+- Add `AssertEqual` function in `go.opentelemetry.io/otel/log/logtest`. (#6662)
+- The `go.opentelemetry.io/otel/semconv/v1.32.0` package.
+ The package contains semantic conventions from the `v1.32.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.32.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.31.0`(#6782)
+- Add `Transform` option in `go.opentelemetry.io/otel/log/logtest`. (#6794)
+- Add `Desc` option in `go.opentelemetry.io/otel/log/logtest`. (#6796)
+
+### Removed
+
+- Drop support for [Go 1.22]. (#6381, #6418)
+- Remove `Resource` field from `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6494)
+- Remove `RecordFactory` type from `go.opentelemetry.io/otel/log/logtest`. (#6492)
+- Remove `ScopeRecords`, `EmittedRecord`, and `RecordFactory` types from `go.opentelemetry.io/otel/log/logtest`. (#6507)
+- Remove `AssertRecordEqual` function in `go.opentelemetry.io/otel/log/logtest`, use `AssertEqual` instead. (#6662)
+
+### Changed
+
+- ⚠️ Update `github.com/prometheus/client_golang` to `v1.21.1`, which changes the `NameValidationScheme` to `UTF8Validation`.
+ This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores.
+ This can be reverted by setting `github.com/prometheus/common/model.NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6433)
+- Initialize map with `len(keys)` in `NewAllowKeysFilter` and `NewDenyKeysFilter` to avoid unnecessary allocations in `go.opentelemetry.io/otel/attribute`. (#6455)
+- `go.opentelemetry.io/otel/log/logtest` is now a separate Go module. (#6465)
+- `go.opentelemetry.io/otel/sdk/log/logtest` is now a separate Go module. (#6466)
+- `Recorder` in `go.opentelemetry.io/otel/log/logtest` no longer separately stores records emitted by loggers with the same instrumentation scope. (#6507)
+- Improve performance of `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` by not exporting when exporter cannot accept more. (#6569, #6641)
+
+### Deprecated
+
+- Deprecate support for `model.LegacyValidation` for `go.opentelemetry.io/otel/exporters/prometheus`. (#6449)
+
+### Fixes
+
+- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6392)
+- Ensure the `noopSpan.tracerProvider` method is not inlined in `go.opentelemetry.io/otel/trace` so the `go.opentelemetry.io/auto` instrumentation can instrument non-recording spans. (#6456)
+- Use a `sync.Pool` instead of allocating `metricdata.ResourceMetrics` in `go.opentelemetry.io/otel/exporters/prometheus`. (#6472)
+
+## [1.35.0/0.57.0/0.11.0] 2025-03-05
+
+This release is the last to support [Go 1.22].
+The next release will require at least [Go 1.23].
+
+### Added
+
+- Add `ValueFromAttribute` and `KeyValueFromAttribute` in `go.opentelemetry.io/otel/log`. (#6180)
+- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/log`. (#6187)
+- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/log/logtest`. (#6187)
+- `AssertRecordEqual` in `go.opentelemetry.io/otel/log/logtest` checks `Record.EventName`. (#6187)
+- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/sdk/log`. (#6193)
+- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest`. (#6193)
+- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6211)
+- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6211)
+- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` (#6210)
+- The `go.opentelemetry.io/otel/semconv/v1.28.0` package.
+ The package contains semantic conventions from the `v1.28.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.28.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.27.0`(#6236)
+- The `go.opentelemetry.io/otel/semconv/v1.30.0` package.
+ The package contains semantic conventions from the `v1.30.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.30.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.28.0`(#6240)
+- Document the pitfalls of using `Resource` as a comparable type.
+ `Resource.Equal` and `Resource.Equivalent` should be used instead. (#6272)
+- Support [Go 1.24]. (#6304)
+- Add `FilterProcessor` and `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`.
+ It replaces `go.opentelemetry.io/otel/sdk/log/internal/x.FilterProcessor`.
+ Compared to previous version it additionally gives the possibility to filter by resource and instrumentation scope. (#6317)
+
+### Changed
+
+- Update `github.com/prometheus/common` to `v0.62.0`, which changes the `NameValidationScheme` to `NoEscaping`.
+ This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores.
+ This is controlled by the `Content-Type` header, or can be reverted by setting `NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6198)
+
+### Fixes
+
+- Eliminate goroutine leak for the processor returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `Shutdown` is called and the passed `ctx` is canceled and `SpanExporter.Shutdown` has not returned. (#6368)
+- Eliminate goroutine leak for the processor returned by `NewBatchSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `ForceFlush` is called and the passed `ctx` is canceled and `SpanExporter.Export` has not returned. (#6369)
+
+## [1.34.0/0.56.0/0.10.0] 2025-01-17
+
+### Changed
+
+- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167)
+
+### Fixed
+
+- Relax minimum Go version to 1.22.0 in various modules. (#6073)
+- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143)
+- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143)
+
+## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12
+
+### Added
+
+- Add `Reset` method to `SpanRecorder` in `go.opentelemetry.io/otel/sdk/trace/tracetest`. (#5994)
+- Add `EnabledInstrument` interface in `go.opentelemetry.io/otel/sdk/metric/internal/x`.
+ This is an experimental interface that is implemented by synchronous instruments provided by `go.opentelemetry.io/otel/sdk/metric`.
+ Users can use it to avoid performing computationally expensive operations when recording measurements.
+ It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#6016)
+
+### Changed
+
+- The default global API now supports full auto-instrumentation from the `go.opentelemetry.io/auto` package.
+ See that package for more information. (#5920)
+- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5929)
+- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5929)
+- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5929)
+- Performance improvements for attribute value `AsStringSlice`, `AsFloat64Slice`, `AsInt64Slice`, `AsBoolSlice`. (#6011)
+- Change `EnabledParameters` to have a `Severity` field instead of a getter and setter in `go.opentelemetry.io/otel/log`. (#6009)
+
+### Fixed
+
+- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5954)
+- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5954)
+- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5954)
+- Fix invalid exemplar keys in `go.opentelemetry.io/otel/exporters/prometheus`. (#5995)
+- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997)
+- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032)
+
+## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08
+
+### Added
+
+- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850)
+- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850)
+- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861)
+- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861)
+- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861)
+- The `go.opentelemetry.io/otel/semconv/v1.27.0` package.
+ The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894)
+- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903)
+- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927)
+- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934)
+- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935)
+- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933)
+- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933)
+- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932)
+
+### Changed
+
+- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924)
+- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926)
+- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925)
+- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931)
+- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804)
+
+### Fixed
+
+- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892)
+- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911)
+- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915)
+- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912)
+- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944)
+- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944)
+- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944)
+- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944)
+- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900)
+
+### Removed
+
+- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930)
+
+## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11
+
+### Added
+
+- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862)
+- Add `WithExportBufferSize` option to log batch processor.(#5877)
+
+### Changed
+
+- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778)
+- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791)
+- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791)
+- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847)
+- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864)
+- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858)
+- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874)
+
+### Deprecated
+
+- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854)
+
+### Fixed
+
+- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819)
+- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803)
+- Fix timer channel drain to avoid hanging on Go 1.23. (#5868)
+- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827)
+- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827)
+
+## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09
+
+### Added
+
+- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739)
+- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773)
+- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773)
+- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755)
+
+### Fixed
+
+- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754)
+- Fix panic on instruments creation when setting meter provider. (#5758)
+- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780)
+
+### Removed
+
+- Drop support for [Go 1.21]. (#5736, #5740, #5800)
+
+## [1.29.0/0.51.0/0.5.0] 2024-08-23
+
+This release is the last to support [Go 1.21].
+The next release will require at least [Go 1.22].
+
+### Added
+
+- Add MacOS ARM64 platform to the compatibility testing suite. (#5577)
+- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627)
+- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`.
+ This new module contains an OTLP exporter that transmits log telemetry using gRPC.
+ This module is unstable and breaking changes may be introduced.
+ See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629)
+- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651)
+- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651)
+- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665)
+- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`.
+ This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not.
+ It replaces the existing `Enabled` method that is removed from the `Processor` interface itself.
+ It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692)
+- Support [Go 1.23]. (#5720)
+
+### Changed
+
+- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132)
+- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636)
+- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665)
+- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666)
+- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666)
+- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method.
+ See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692)
+- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
+- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
+
+### Fixed
+
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584)
+- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541)
+- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612)
+- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612)
+- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612)
+- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612)
+- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612)
+- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612)
+- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612)
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641)
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650)
+- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
+- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
+
+### Removed
+
+- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
+- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
+
+## [1.28.0/0.50.0/0.4.0] 2024-07-02
+
+### Added
+
+- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`.
+ This method is used to check if an `Instrument` instance is a zero-value. (#5431)
+- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468)
+- The `go.opentelemetry.io/otel/semconv/v1.26.0` package.
+ The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476)
+- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499)
+- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530)
+
+### Changed
+
+- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457)
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490)
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490)
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490)
+ - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes.
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490)
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490)
+- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493)
+- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497)
+- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520)
+- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545)
+
+### Fixed
+
+- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376)
+- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426)
+- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426)
+- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434)
+- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435)
+- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456)
+- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464)
+- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494)
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508)
+- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514)
+- Fix stale timestamps reported by the last-value aggregation. (#5517)
+- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521)
+- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549)
+- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528)
+
+## [1.27.0/0.49.0/0.3.0] 2024-05-21
+
+### Added
+
+- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242)
+- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258)
+- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263)
+- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276)
+- Add metrics in the `otel-collector` example. (#5283)
+- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304)
+ - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`.
+ - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument.
+- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349)
+
+### Changed
+
+- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189)
+- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189)
+- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230)
+- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230)
+- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241)
+- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272)
+- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286)
+- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305)
+- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315)
+- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374)
+
+### Fixed
+
+- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306)
+- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311)
+- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365)
+- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371)
+- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375)
+
+## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24
+
+### Added
+
+- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134)
+- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194)
+- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`.
+ This new module contains the Go implementation of the OpenTelemetry Logs SDK.
+ This module is unstable and breaking changes may be introduced.
+ See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
+- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`.
+ This new module contains an OTLP exporter that transmits log telemetry using HTTP.
+ This module is unstable and breaking changes may be introduced.
+ See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
+- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`.
+ This new module contains an exporter prints log records to STDOUT.
+ This module is unstable and breaking changes may be introduced.
+ See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
+- The `go.opentelemetry.io/otel/semconv/v1.25.0` package.
+ The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254)
+
+### Changed
+
+- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177)
+- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214)
+- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244)
+
+### Fixed
+
+- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159)
+
+## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05
+
+### Added
+
+- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906)
+- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906)
+- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032)
+- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`.
+ This method is used to notify users if a log record will be emitted or not. (#5071)
+- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`.
+ This value represents an unset severity level. (#5072)
+- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076)
+- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`.
+ This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes.
+ At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085)
+- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100)
+- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108)
+- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116)
+- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117)
+- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111)
+- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528)
+
+### Changed
+
+- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049)
+- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151)
+ Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError).
+ Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated.
+
+### Fixed
+
+- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027)
+- Prevent default `ErrorHandler` self-delegation. (#5137)
+- Update all dependencies to address [GO-2024-2687]. (#5139)
+
+### Removed
+
+- Drop support for [Go 1.20]. (#4967)
+
+### Deprecated
+
+- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734)
+- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734)
+- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734)
+
+## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23
+
+This release is the last to support [Go 1.20].
+The next release will require at least [Go 1.21].
+
+### Added
+
+- Support [Go 1.22]. (#4890)
+- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4900)
+- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4900)
+- The `go.opentelemetry.io/otel/log` module is added.
+ This module includes OpenTelemetry Go's implementation of the Logs Bridge API.
+ This module is in an alpha state, it is subject to breaking changes.
+ See our [versioning policy](./VERSIONING.md) for more info. (#4961)
+- Add ARM64 platform to the compatibility testing suite. (#4994)
+
+### Fixed
+
+- Fix registration of multiple callbacks when using the global meter provider from `go.opentelemetry.io/otel`. (#4945)
+- Fix negative buckets in output of exponential histograms. (#4956)
+
+## [1.23.1] 2024-02-07
+
+### Fixed
+
+- Register all callbacks passed during observable instrument creation instead of just the last one multiple times in `go.opentelemetry.io/otel/sdk/metric`. (#4888)
+
+## [1.23.0] 2024-02-06
+
+This release contains the first stable, `v1`, release of the following modules:
+
+- `go.opentelemetry.io/otel/bridge/opencensus`
+- `go.opentelemetry.io/otel/bridge/opencensus/test`
+- `go.opentelemetry.io/otel/example/opencensus`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`
+- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`
+
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- Add `WithEndpointURL` option to the `exporters/otlp/otlpmetric/otlpmetricgrpc`, `exporters/otlp/otlpmetric/otlpmetrichttp`, `exporters/otlp/otlptrace/otlptracegrpc` and `exporters/otlp/otlptrace/otlptracehttp` packages. (#4808)
+- Experimental exemplar exporting is added to the metric SDK.
+ See [metric documentation](./sdk/metric/internal/x/README.md#exemplars) for more information about this feature and how to enable it. (#4871)
+- `ErrSchemaURLConflict` is added to `go.opentelemetry.io/otel/sdk/resource`.
+ This error is returned when a merge of two `Resource`s with different (non-empty) schema URL is attempted. (#4876)
+
+### Changed
+
+- The `Merge` and `New` functions in `go.opentelemetry.io/otel/sdk/resource` now returns a partial result if there is a schema URL merge conflict.
+ Instead of returning `nil` when two `Resource`s with different (non-empty) schema URLs are merged the merged `Resource`, along with the new `ErrSchemaURLConflict` error, is returned.
+ It is up to the user to decide if they want to use the returned `Resource` or not.
+ It may have desired attributes overwritten or include stale semantic conventions. (#4876)
+
+### Fixed
+
+- Fix `ContainerID` resource detection on systemd when cgroup path has a colon. (#4449)
+- Fix `go.opentelemetry.io/otel/sdk/metric` to cache instruments to avoid leaking memory when the same instrument is created multiple times. (#4820)
+- Fix missing `Mix` and `Max` values for `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` by introducing `MarshalText` and `MarshalJSON` for the `Extrema` type in `go.opentelemetry.io/sdk/metric/metricdata`. (#4827)
+
+## [1.23.0-rc.1] 2024-01-18
+
+This is a release candidate for the v1.23.0 release.
+That release is expected to include the `v1` release of the following modules:
+
+- `go.opentelemetry.io/otel/bridge/opencensus`
+- `go.opentelemetry.io/otel/bridge/opencensus/test`
+- `go.opentelemetry.io/otel/example/opencensus`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`
+- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`
+
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+## [1.22.0/0.45.0] 2024-01-17
+
+### Added
+
+- The `go.opentelemetry.io/otel/semconv/v1.22.0` package.
+ The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735)
+- The `go.opentelemetry.io/otel/semconv/v1.23.0` package.
+ The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746)
+- The `go.opentelemetry.io/otel/semconv/v1.23.1` package.
+ The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749)
+- The `go.opentelemetry.io/otel/semconv/v1.24.0` package.
+ The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770)
+- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733)
+- Experimental cardinality limiting is added to the metric SDK.
+ See [metric documentation](./sdk/metric/internal/x/README.md#cardinality-limit) for more information about this feature and how to enable it. (#4457)
+- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804)
+
+### Changed
+
+- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754)
+- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.24.0` version of the OpenTelemetry specification. (#4754)
+- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`.
+ If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671)
+- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722)
+- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721)
+- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743)
+- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774)
+- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775)
+- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818)
+- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804)
+
+### Fixed
+
+- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755)
+- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756)
+- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772)
+- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776)
+- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804)
+- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742)
+
+## [1.21.0/0.44.0] 2023-11-16
+
+### Removed
+
+- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706)
+- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707)
+- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708)
+- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723)
+
+### Fixed
+
+- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719)
+- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719)
+
+## [1.20.0/0.43.0] 2023-11-10
+
+This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this.
+
+### Added
+
+- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567)
+- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584)
+- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620)
+- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620)
+- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644)
+- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649)
+- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603)
+- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660)
+- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660)
+- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622)
+- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585)
+- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605)
+- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668)
+
+### Deprecated
+
+- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567)
+- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618)
+- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`.
+ Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620)
+- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649)
+- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693)
+
+### Changed
+
+- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583)
+- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type.
+ This extends the `TracerProvider` interface and is is a breaking change for any existing implementation.
+ Implementers need to update their implementations based on what they want the default behavior of the interface to be.
+ See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
+- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type.
+ This extends the `Tracer` interface and is is a breaking change for any existing implementation.
+ Implementers need to update their implementations based on what they want the default behavior of the interface to be.
+ See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
+- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type.
+ This extends the `Span` interface and is is a breaking change for any existing implementation.
+ Implementers need to update their implementations based on what they want the default behavior of the interface to be.
+ See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
+- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670)
+- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670)
+- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669)
+- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669)
+- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679)
+- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679)
+
+### Fixed
+
+- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699)
+- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648)
+- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695)
+- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695)
+
+## [1.19.0/0.42.0/0.0.7] 2023-09-28
+
+This release contains the first stable release of the OpenTelemetry Go [metric SDK].
+Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539)
+- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507)
+
+### Changed
+
+- Allow '/' characters in metric instrument names. (#4501)
+- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507)
+- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535)
+
+### Fixed
+
+- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499)
+
+### Removed
+
+- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566)
+
+## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14
+
+This is a release candidate for the v1.19.0/v0.42.0 release.
+That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Changed
+
+- Allow '/' characters in metric instrument names. (#4501)
+
+### Fixed
+
+- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499)
+
+## [1.18.0/0.41.0/0.0.6] 2023-09-12
+
+This release drops the compatibility guarantee of [Go 1.19].
+
+### Added
+
+- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473)
+- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447)
+
+### Changed
+
+- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483)
+
+### Deprecated
+
+- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541).
+ The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470)
+
+### Removed
+
+- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467)
+- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467)
+- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468)
+- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469)
+- Dropped guaranteed support for versions of Go less than 1.20. (#4481)
+
+## [1.17.0/0.40.0/0.0.5] 2023-08-28
+
+### Added
+
+- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
+- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
+- Add support for exponential histogram aggregations.
+ A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245)
+- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272)
+- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272)
+- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287)
+- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306)
+- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315)
+- The `go.opentelemetry.io/otel/semconv/v1.21.0` package.
+ The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362)
+- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365)
+- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381)
+- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374)
+- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435)
+- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437)
+- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444)
+- Support Go 1.21. (#4463)
+
+### Changed
+
+- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145)
+- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202)
+- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210)
+- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244)
+- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244)
+- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221)
+- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
+- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
+- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290)
+- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289)
+- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332)
+- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333)
+- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377)
+- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408)
+- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434)
+- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346)
+
+### Removed
+
+- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`.
+ Use the added `WithProducer` option instead. (#4346)
+- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`.
+ Notice that `PeriodicReader.ForceFlush` is still available. (#4375)
+
+### Fixed
+
+- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143)
+- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307)
+- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317)
+- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337)
+- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338)
+- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350)
+- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350)
+- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349)
+- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353)
+- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846)
+- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395)
+- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373)
+- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409)
+- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated.
+ OpenTelemetry dropped support for Jaeger exporter in July 2023.
+ Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`
+ or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423)
+- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420)
+- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421)
+- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421)
+- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated.
+ Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435)
+
+## [1.16.0/0.39.0] 2023-05-18
+
+This release contains the first stable release of the OpenTelemetry Go [metric API].
+Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- The `go.opentelemetry.io/otel/semconv/v1.19.0` package.
+ The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848)
+- The `go.opentelemetry.io/otel/semconv/v1.20.0` package.
+ The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078)
+- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165)
+- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222)
+- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271)
+
+### Changed
+
+- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049)
+- `MeterProvider` returns noop meters once it has been shutdown. (#4154)
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed.
+ Use `go.opentelemetry.io/otel/metric` instead. (#4055)
+
+### Fixed
+
+- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077)
+
+## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03
+
+This is a release candidate for the v1.16.0/v0.39.0 release.
+That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039)
+ - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`.
+ - Use `GetMeterProivder` for a global `metric.MeterProvider`.
+ - Use `SetMeterProivder` to set the global `metric.MeterProvider`.
+
+### Changed
+
+- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set.
+ This stages the metric API to be released as a stable module. (#4038)
+
+### Removed
+
+- The `go.opentelemetry.io/otel/metric/global` package is removed.
+ Use `go.opentelemetry.io/otel` instead. (#4039)
+
+## [1.15.1/0.38.1] 2023-05-02
+
+### Fixed
+
+- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041)
+
+## [1.15.0/0.38.0] 2023-04-27
+
+### Added
+
+- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916)
+- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949)
+- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970)
+- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971)
+ - The `AddConfig` used to hold configuration for addition measurements
+ - `NewAddConfig` used to create a new `AddConfig`
+ - `AddOption` used to configure an `AddConfig`
+ - The `RecordConfig` used to hold configuration for recorded measurements
+ - `NewRecordConfig` used to create a new `RecordConfig`
+ - `RecordOption` used to configure a `RecordConfig`
+ - The `ObserveConfig` used to hold configuration for observed measurements
+ - `NewObserveConfig` used to create a new `ObserveConfig`
+ - `ObserveOption` used to configure an `ObserveConfig`
+- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`.
+ They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971)
+- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956)
+- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956)
+
+### Changed
+
+- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870)
+- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`.
+ This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916)
+- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941)
+ - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider`
+- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966)
+- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974)
+- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971)
+ - The `Int64Counter.Add` method now accepts `...AddOption`
+ - The `Float64Counter.Add` method now accepts `...AddOption`
+ - The `Int64UpDownCounter.Add` method now accepts `...AddOption`
+ - The `Float64UpDownCounter.Add` method now accepts `...AddOption`
+ - The `Int64Histogram.Record` method now accepts `...RecordOption`
+ - The `Float64Histogram.Record` method now accepts `...RecordOption`
+ - The `Int64Observer.Observe` method now accepts `...ObserveOption`
+ - The `Float64Observer.Observe` method now accepts `...ObserveOption`
+- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971)
+ - The `Observer.ObserveInt64` method now accepts `...ObserveOption`
+ - The `Observer.ObserveFloat64` method now accepts `...ObserveOption`
+- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986)
+
+### Fixed
+
+- `TracerProvider` allows calling `Tracer()` while it's shutting down.
+ It used to deadlock. (#3924)
+- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949)
+- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951)
+- Automatically figure out the default aggregation with `aggregation.Default`. (#3967)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated.
+ Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018)
+
+## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23
+
+This is a release candidate for the v1.15.0/v0.38.0 release.
+That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812)
+- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828)
+- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`.
+ Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849)
+- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895)
+- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900)
+- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854)
+
+### Changed
+
+- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832)
+- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832)
+- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833)
+- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844)
+- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849)
+- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853)
+- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892)
+- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895)
+- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895)
+- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900)
+
+### Fixed
+
+- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845)
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829)
+- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892)
+- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`.
+ Use the added `float64` instrument configuration instead. (#3895)
+- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`.
+ Use the added `int64` instrument configuration instead. (#3895)
+- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893)
+
+## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01
+
+This is a release candidate for the v1.15.0/v0.38.0 release.
+That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+This release drops the compatibility guarantee of [Go 1.18].
+
+### Added
+
+- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818)
+ - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`.
+ - Use `GetMeterProivder` for a global `metric.MeterProvider`.
+ - Use `SetMeterProivder` to set the global `metric.MeterProvider`.
+
+### Changed
+
+- Dropped compatibility testing for [Go 1.18].
+ The project no longer guarantees support for this version of Go. (#3813)
+
+### Fixed
+
+- Handle empty environment variable as it they were not set. (#3764)
+- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823)
+- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899)
+- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/metric/global` package is deprecated.
+ Use `go.opentelemetry.io/otel` instead. (#3818)
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814)
+
+## [1.14.0/0.37.0/0.0.4] 2023-02-27
+
+This release is the last to support [Go 1.18].
+The next release will require at least [Go 1.19].
+
+### Added
+
+- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697)
+- Support [Go 1.20]. (#3693)
+- The `go.opentelemetry.io/otel/semconv/v1.18.0` package.
+ The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719)
+ - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included:
+ - `OtelScopeNameKey` -> `OTelScopeNameKey`
+ - `OtelScopeVersionKey` -> `OTelScopeVersionKey`
+ - `OtelLibraryNameKey` -> `OTelLibraryNameKey`
+ - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey`
+ - `OtelStatusCodeKey` -> `OTelStatusCodeKey`
+ - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey`
+ - `OtelStatusCodeOk` -> `OTelStatusCodeOk`
+ - `OtelStatusCodeError` -> `OTelStatusCodeError`
+ - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included:
+ - `OtelScopeName` -> `OTelScopeName`
+ - `OtelScopeVersion` -> `OTelScopeVersion`
+ - `OtelLibraryName` -> `OTelLibraryName`
+ - `OtelLibraryVersion` -> `OTelLibraryVersion`
+ - `OtelStatusDescription` -> `OTelStatusDescription`
+- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state.
+ See the [README](./bridge/opentracing/README.md) for more information. (#3570)
+- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738)
+- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739)
+- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763)
+ - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports.
+ - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted.
+
+### Changed
+
+- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679)
+- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into.
+ This change is made to enable memory reuse by SDK users. (#3732)
+- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776)
+
+### Fixed
+
+- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725)
+- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724)
+- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733)
+- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743)
+- Data race issue in OTLP exporter retry mechanism. (#3755, #3756)
+- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772)
+- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/metric/unit` package is deprecated.
+ Use the equivalent unit string instead. (#3776)
+ - Use `"1"` instead of `unit.Dimensionless`
+ - Use `"By"` instead of `unit.Bytes`
+ - Use `"ms"` instead of `unit.Milliseconds`
+
+## [1.13.0/0.36.0] 2023-02-07
+
+### Added
+
+- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions.
+ These functions ensure semantic convention type correctness. (#3675)
+
+### Fixed
+
+- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687)
+ - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`
+ - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv`
+ - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv`
+ - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv`
+ - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv`
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631)
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631)
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631)
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631)
+
+## [1.12.0/0.35.0] 2023-01-28
+
+### Added
+
+- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`.
+ This options is used to configure `int64` Observer callbacks during their creation. (#3507)
+- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`.
+ This options is used to configure `float64` Observer callbacks during their creation. (#3507)
+- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`.
+ These additions are used to enable external metric Producers. (#3524)
+- The `Callback` function type to `go.opentelemetry.io/otel/metric`.
+ This new named function type is registered with a `Meter`. (#3564)
+- The `go.opentelemetry.io/otel/semconv/v1.13.0` package.
+ The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499)
+ - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`.
+ - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`.
+- The `go.opentelemetry.io/otel/semconv/v1.14.0` package.
+ The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566)
+- The `go.opentelemetry.io/otel/semconv/v1.15.0` package.
+ The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578)
+- The `go.opentelemetry.io/otel/semconv/v1.16.0` package.
+ The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579)
+- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`.
+ These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586)
+ - `Float64ObservableCounter` replaces the `asyncfloat64.Counter`
+ - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter`
+ - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge`
+ - `Int64ObservableCounter` replaces the `asyncint64.Counter`
+ - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter`
+ - `Int64ObservableGauge` replaces the `asyncint64.Gauge`
+ - `Float64Counter` replaces the `syncfloat64.Counter`
+ - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter`
+ - `Float64Histogram` replaces the `syncfloat64.Histogram`
+ - `Int64Counter` replaces the `syncint64.Counter`
+ - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter`
+ - `Int64Histogram` replaces the `syncint64.Histogram`
+- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`.
+ This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116)
+- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`.
+ This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487)
+- The `go.opentelemetry.io/otel/semconv/v1.17.0` package.
+ The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599)
+
+### Changed
+
+- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500)
+- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507)
+ - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`.
+ - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`.
+ - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`.
+ - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`.
+- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package.
+ This `Registration` can be used to unregister callbacks. (#3522)
+- Global error handler uses an atomic value instead of a mutex. (#3543)
+- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541)
+- Global logger uses an atomic value instead of a mutex. (#3545)
+- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551)
+- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions.
+ This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557)
+- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name.
+ Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516)
+- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514)
+- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562)
+ - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter`
+ - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter`
+ - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram`
+ - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter`
+ - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter`
+ - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge`
+- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed.
+ - The named `Callback` replaces the inline function parameter. (#3564)
+ - `Callback` is required to return an error. (#3576)
+ - `Callback` accepts the added `Observer` parameter added.
+ This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584)
+ - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587)
+- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions.
+ This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint.
+ Instead it uses the `net.sock.peer` attributes. (#3581)
+- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487)
+
+### Fixed
+
+- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549)
+- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter.
+ Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584)
+
+### Deprecated
+
+- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated.
+ Use `NewMetricProducer` instead. (#3541)
+- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated.
+ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated.
+ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated.
+ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated.
+ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated.
+ Use `NewTracerProvider` instead. (#3116)
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520)
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed.
+ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+ - The `Counter` method is replaced by `Meter.Int64ObservableCounter`
+ - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter`
+ - The `Gauge` method is replaced by `Meter.Int64ObservableGauge`
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed.
+ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+ - The `Counter` method is replaced by `Meter.Float64ObservableCounter`
+ - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter`
+ - The `Gauge` method is replaced by `Meter.Float64ObservableGauge`
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed.
+ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+ - The `Counter` method is replaced by `Meter.Int64Counter`
+ - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter`
+ - The `Histogram` method is replaced by `Meter.Int64Histogram`
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed.
+ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+ - The `Counter` method is replaced by `Meter.Float64Counter`
+ - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter`
+ - The `Histogram` method is replaced by `Meter.Float64Histogram`
+
+## [1.11.2/0.34.0] 2022-12-05
+
+### Added
+
+- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package.
+ This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387)
+- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter.
+ This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357)
+- OTLP exporters now recognize: (#3363)
+ - `OTEL_EXPORTER_OTLP_INSECURE`
+ - `OTEL_EXPORTER_OTLP_TRACES_INSECURE`
+ - `OTEL_EXPORTER_OTLP_METRICS_INSECURE`
+ - `OTEL_EXPORTER_OTLP_CLIENT_KEY`
+ - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY`
+ - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY`
+ - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE`
+ - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE`
+ - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE`
+- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`.
+ These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459)
+- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`.
+ These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459)
+- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459)
+- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487)
+
+### Changed
+
+- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`.
+ Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option.
+ The views registered with the `MeterProvider` apply to all `Reader`s. (#3387)
+- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260)
+- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260)
+- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260)
+- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260)
+
+### Fixed
+
+- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369)
+- Remove comparable requirement for `Reader`s. (#3387)
+- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389)
+- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398)
+- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340)
+- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436)
+- Re-enabled Attribute Filters in the Metric SDK. (#3396)
+- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408)
+- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432)
+- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440)
+- Prevent duplicate Prometheus description, unit, and type. (#3469)
+- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489)
+
+### Removed
+
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated.
+ Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476)
+
+## [1.11.1/0.33.0] 2022-10-19
+
+### Added
+
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation.
+ By default, it will register with the default Prometheus registerer.
+ A non-default registerer can be used by passing the `WithRegisterer` option. (#3239)
+- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341)
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285)
+
+### Changed
+
+- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error.
+ It will return an error if the exporter fails to register with Prometheus. (#3239)
+
+### Fixed
+
+- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963)
+- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it.
+ This fixes the implementation to be compliant with the W3C specification. (#3226)
+- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252)
+- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268)
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281)
+- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293)
+- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278)
+- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358)
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup.
+ Instead the exporter is defined as an "unchecked" collector for Prometheus.
+ This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342)
+- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360)
+- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names.
+ This can be disabled using the `WithoutUnits()` option added to that package. (#3352)
+
+## [1.11.0/0.32.3] 2022-10-12
+
+### Added
+
+- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261)
+
+### Changed
+
+- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214)
+- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`.
+ This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235)
+
+## [0.32.2] Metric SDK (Alpha) - 2022-10-11
+
+### Added
+
+- Added an example of using metric views to customize instruments. (#3177)
+- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261)
+
+### Changed
+
+- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220)
+- Update histogram default bounds to match the requirements of the latest specification. (#3222)
+- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265)
+
+### Fixed
+
+- Use default view if instrument does not match any registered view of a reader. (#3224, #3237)
+- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251)
+- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251)
+- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251)
+- The OpenCensus bridge no longer sends empty batches of metrics. (#3263)
+
+## [0.32.1] Metric SDK (Alpha) - 2022-09-22
+
+### Changed
+
+- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting.
+ Invalid characters are replaced with `_`. (#3212)
+
+### Added
+
+- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192)
+- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206)
+
+### Fixed
+
+- Updated go.mods to point to valid versions of the sdk. (#3216)
+- Set the `MeterProvider` resource on all exported metric data. (#3218)
+
+## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18
+
+### Changed
+
+- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification.
+ Please see the package documentation for how the new SDK is initialized and configured. (#3175)
+- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179)
+
+### Removed
+
+- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed.
+ A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed.
+ A replacement package that supports the new metric SDK will be added back in a future release. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175)
+- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175)
+
+## [1.10.0] - 2022-09-09
+
+### Added
+
+- Support Go 1.19. (#3077)
+ Include compatibility testing and document support. (#3077)
+- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106)
+- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107)
+
+### Changed
+
+- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096)
+- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110)
+- All exporters will be shutdown even if one reports an error (#3091)
+- Ensure valid UTF-8 when truncating over-length attribute values. (#3156)
+
+## [1.9.0/0.0.3] - 2022-08-01
+
+### Added
+
+- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999)
+- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package.
+ The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009)
+- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package.
+ The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010)
+- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018)
+
+### Fixed
+
+- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029)
+
+## [1.8.0/0.31.0] - 2022-07-08
+
+### Added
+
+- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods
+of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911)
+
+### Changed
+
+- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886)
+- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976)
+- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866)
+
+### Removed
+
+- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917)
+
+### Deprecated
+
+- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated.
+ Use the equivalent `Scope` struct instead. (#2977)
+- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated.
+ Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977)
+
+## [1.7.0/0.30.0] - 2022-04-28
+
+### Added
+
+- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package.
+ The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763)
+- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package.
+ The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792)
+- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package.
+ The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842)
+- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776)
+
+### Fixed
+
+- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784)
+- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786)
+
+### Changed
+
+- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790)
+- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`.
+ The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790)
+- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`.
+ Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790)
+
+### Deprecated
+
+- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
+ Use the equivalent `Iterator.Attribute` method instead. (#2790)
+- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
+ Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790)
+- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
+ Use the equivalent `MergeIterator.Attribute` method instead. (#2790)
+
+### Removed
+
+- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864)
+- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864)
+
+## [0.29.0] - 2022-04-11
+
+### Added
+
+- The metrics global package was added back into several test files. (#2764)
+- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package.
+ This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750)
+
+### Removed
+
+- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`.
+ Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720)
+
+### Changed
+
+- Don't panic anymore when setting a global MeterProvider to itself. (#2749)
+- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`.
+ This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748)
+
+## [1.6.3] - 2022-04-07
+
+### Fixed
+
+- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773)
+
+## [1.6.2] - 2022-04-06
+
+### Changed
+
+- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749)
+- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`.
+ This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748)
+
+## [1.6.1] - 2022-03-28
+
+### Fixed
+
+- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant.
+ Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744)
+
+### Security
+
+- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`.
+ This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728)
+
+## [1.6.0/0.28.0] - 2022-03-23
+
+### ⚠️ Notice ⚠️
+
+This update is a breaking change of the unstable Metrics API.
+Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified.
+
+### Added
+
+- Add metrics exponential histogram support.
+ New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502)
+- Add Go 1.18 to our compatibility tests. (#2679)
+- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517)
+- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660)
+
+### Changed
+
+- The metrics API has been significantly changed to match the revised OpenTelemetry specification.
+ High-level changes include:
+
+ - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s.
+ These `InstrumentProvider`s are managed with a `Meter`.
+ - Synchronous and asynchronous instruments are grouped into their own packages based on value types.
+ - Asynchronous callbacks can now be registered with a `Meter`.
+
+ Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660)
+
+### Fixed
+
+- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677)
+
+## [1.5.0] - 2022-03-16
+
+### Added
+
+- Log the Exporters configuration in the TracerProviders message. (#2578)
+- Added support to configure the span limits with environment variables.
+ The following environment variables are supported. (#2606, #2637)
+ - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT`
+ - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT`
+ - `OTEL_SPAN_EVENT_COUNT_LIMIT`
+ - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT`
+ - `OTEL_SPAN_LINK_COUNT_LIMIT`
+ - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT`
+
+ If the provided environment variables are invalid (negative), the default values would be used.
+- Rename the `gc` runtime name to `go` (#2560)
+- Add resource container ID detection. (#2418)
+- Add span attribute value length limit.
+ The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`.
+ The default limit for this resource is "unlimited". (#2637)
+- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`.
+ This option replaces the `WithSpanLimits` option.
+ Zero or negative values will not be changed to the default value like `WithSpanLimits` does.
+ Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited.
+ Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637)
+
+### Changed
+
+- Drop oldest tracestate `Member` when capacity is reached. (#2592)
+- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601)
+- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639)
+- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640)
+- Introduce new internal `envconfig` package for OTLP exporters. (#2608)
+- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661)
+
+### Fixed
+
+- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616)
+- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625)
+- Unlimited span limits are now supported (negative values). (#2636, #2637)
+
+### Deprecated
+
+- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`.
+ Use `WithRawSpanLimits` instead.
+ That option allows setting unlimited and zero limits, this option does not.
+ This option will be kept until the next major version incremented release. (#2637)
+
+## [1.4.1] - 2022-02-16
+
+### Fixed
+
+- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615)
+
+## [1.4.0] - 2022-02-11
+
+### Added
+
+- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490)
+- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging.
+ To enable use a logger with Verbosity (V level) `>=1`. (#2500)
+- Added support to configure the batch span-processor with environment variables.
+ The following environment variables are used. (#2515)
+ - `OTEL_BSP_SCHEDULE_DELAY`
+ - `OTEL_BSP_EXPORT_TIMEOUT`
+ - `OTEL_BSP_MAX_QUEUE_SIZE`.
+ - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE`
+
+### Changed
+
+- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589)
+
+### Deprecated
+
+- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`.
+ Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382)
+- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445)
+
+### Fixed
+
+- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461)
+- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512)
+- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491)
+- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493)
+- W3C baggage will now decode urlescaped values. (#2529)
+- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522)
+- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification.
+ Instead of dropping the least-recently-used attribute, the last added attribute is dropped.
+ This drop order still only applies to attributes with unique keys not already contained in the span.
+ If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576)
+
+### Removed
+
+- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546)
+ - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge)
+ - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram)
+ - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum)
+
+## [1.3.0] - 2021-12-10
+
+### ⚠️ Notice ⚠️
+
+We have updated the project minimum supported Go version to 1.16
+
+### Added
+
+- Added an internal Logger.
+ This can be used by the SDK and API to provide users with feedback of the internal state.
+ To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343)
+- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425)
+- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296)
+
+### Changed
+
+- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329)
+- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425)
+- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425)
+- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432)
+- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371)
+
+### Fixed
+
+- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification.
+ Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP__ENDPOINT` environment variable is now used without modification of the path.
+ When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433)
+- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381)
+- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440)
+
+### Deprecated
+
+- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425)
+- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425)
+
+### Removed
+
+- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350)
+- Remove the metric Bound Instruments interface and implementations. (#2399)
+- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423)
+- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348)
+
+## [1.2.0] - 2021-11-12
+
+### Changed
+
+- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274)
+- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274)
+- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface:
+ - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner`
+ - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`.
+ - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271)
+- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335)
+
+### Added
+
+- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002)
+- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267)
+- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334)
+
+## [1.1.0] - 2021-10-27
+
+### Added
+
+- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002)
+- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package.
+ The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320)
+- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package.
+ The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321)
+- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package.
+ The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322)
+ - When upgrading from the `semconv/v1.4.0` package note the following name changes:
+ - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey`
+ - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey`
+ - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey`
+ - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey`
+ - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey`
+ - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey`
+
+### Changed
+
+- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275).
+
+### Fixed
+
+- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284)
+- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285)
+- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289)
+
+## [1.0.1] - 2021-10-01
+
+### Fixed
+
+- json stdout exporter no longer crashes due to concurrency bug. (#2265)
+
+## [Metrics 0.24.0] - 2021-10-01
+
+### Changed
+
+- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237)
+- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197)
+ - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`.
+ - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`.
+
+## [1.0.0] - 2021-09-20
+
+This is the first stable release for the project.
+This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md).
+
+### Added
+
+- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242)
+
+### Fixed
+
+- Slice-valued attributes can correctly be used as map keys. (#2223)
+
+### Removed
+
+- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248)
+- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234)
+- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233)
+- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package.
+ Use the typed functions and methods added to the package instead. (#2235)
+ - The `Key.Array` method is removed.
+ - The `Array` function is removed.
+ - The `Any` function is removed.
+ - The `ArrayValue` function is removed.
+ - The `AsArray` function is removed.
+
+## [1.0.0-RC3] - 2021-09-02
+
+### Added
+
+- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149)
+- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163)
+- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162)
+ - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package.
+- Added the `go.opentelemetry.io/otel/example/fib` example package.
+ Included is an example application that computes Fibonacci numbers. (#2203)
+
+### Changed
+
+- Metric instruments have been renamed to match the (feature-frozen) metric API specification:
+ - ValueRecorder becomes Histogram
+ - ValueObserver becomes Gauge
+ - SumObserver becomes CounterObserver
+ - UpDownSumObserver becomes UpDownCounterObserver
+ The API exported from this project is still considered experimental. (#2202)
+- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091)
+- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120)
+- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196)
+- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated.
+ All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package.
+ The functions from that package should be used instead. (#2166)
+- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated.
+ Use the typed `*Slice` functions and types added to the package instead. (#2162)
+- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated.
+ Use the typed functions instead. (#2181)
+- The `go.opentelemetry.io/otel/oteltest` package is deprecated.
+ The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188)
+
+### Removed
+
+- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105)
+
+### Fixed
+
+- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138)
+- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140)
+- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169)
+- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120)
+- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195)
+- Fixed typos in resources.go. (#2201)
+
+## [1.0.0-RC2] - 2021-07-26
+
+### Added
+
+- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840)
+- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840)
+- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package.
+ This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095)
+- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115)
+- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`.
+ This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK.
+ For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118)
+- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package.
+ This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132)
+
+### Changed
+
+- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027)
+- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095)
+
+### Deprecated
+
+- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114)
+- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123)
+- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated.
+ Use the `trace.ParseTraceState` function instead. (#2122)
+
+### Removed
+
+- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020)
+- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020)
+- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function.
+ The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097)
+- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package.
+ The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095)
+- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118)
+
+### Fixed
+
+- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032)
+- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073)
+- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092)
+- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package.
+ This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102)
+- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108)
+- Use `6831` as default Jaeger agent port instead of `6832`. (#2131)
+
+## [Experimental Metrics v0.22.0] - 2021-07-19
+
+### Added
+
+- Adds HTTP support for OTLP metrics exporter. (#2022)
+
+### Removed
+
+- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020)
+
+## [1.0.0-RC1] / 0.21.0 - 2021-06-18
+
+With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1`
+while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules
+with major version 0.
+
+### Added
+
+- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832)
+ - The following status codes are defined as transient errors:
+ | gRPC Status Code | Description |
+ | ---------------- | ----------- |
+ | 1 | Cancelled |
+ | 4 | Deadline Exceeded |
+ | 8 | Resource Exhausted |
+ | 10 | Aborted |
+ | 10 | Out of Range |
+ | 14 | Unavailable |
+ | 15 | Data Loss |
+- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874)
+- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package.
+ This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873)
+- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886)
+- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889)
+- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912)
+- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package.
+ It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937)
+- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package.
+ This method returns the number of list-members the `TraceState` holds. (#1937)
+- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data.
+ Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922)
+- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967)
+- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package.
+ These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967)
+- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969)
+- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963)
+- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938)
+- Several builtin resource detectors now correctly populate the schema URL. (#1938)
+- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data.
+- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991)
+- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005)
+- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005)
+- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009)
+
+### Changed
+
+- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item.
+ `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798)
+- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810)
+- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846)
+- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865)
+- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860)
+- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855)
+- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871)
+- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method.
+ This method returns the status of a span using the new `Status` type. (#1874)
+- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`.
+ This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873)
+- Unembed `SpanContext` in `Link`. (#1877)
+- Generate Semantic conventions from the specification YAML. (#1891)
+- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901)
+- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902)
+- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903)
+- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921)
+- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
+- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921)
+- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
+- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
+- Refactored option types according to the contribution style guide. (#1882)
+- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package.
+ This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use.
+ The new `ParseTraceState` function should be used to create a `TraceState`. (#1931)
+- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931)
+- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931)
+- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931)
+- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931)
+- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985)
+- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985)
+- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987)
+- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993)
+- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993)
+- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993)
+
+### Removed
+
+- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810)
+- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810)
+- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`.
+ The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate.
+ The `IsRecording` method returns if the span is recording or not.
+ A read-only span value does not need to know if updates to it will be recorded or not.
+ By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873)
+- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package.
+ The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type.
+ When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873)
+- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package.
+ Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own.
+ The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900)
+ - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009)
+- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919)
+- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931)
+- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package.
+ Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967)
+- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed.
+ These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985)
+- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990)
+- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005)
+
+### Fixed
+
+- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851)
+- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856)
+- BatchSpanProcessor now drops span batches that failed to be exported. (#1860)
+- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898)
+- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931)
+- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973)
+- Avoid transport security when OTLP endpoint is a Unix socket. (#2001)
+
+### Security
+
+## [0.20.0] - 2021-04-23
+
+### Added
+
+- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373)
+- Adds semantic conventions for exceptions. (#1492)
+- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT`
+ These environment variables can be used to override Jaeger agent hostname and port (#1752)
+- Option `ExportTimeout` was added to batch span processor. (#1755)
+- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770)
+- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771)
+- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771)
+- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772)
+- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785)
+- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788)
+- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788)
+ - `process.pid`
+ - `process.executable.name`
+ - `process.executable.path`
+ - `process.command_args`
+ - `process.owner`
+ - `process.runtime.name`
+ - `process.runtime.version`
+ - `process.runtime.description`
+- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789)
+- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811)
+ - `OTEL_EXPORTER_OTLP_ENDPOINT`
+ - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT`
+ - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT`
+ - `OTEL_EXPORTER_OTLP_HEADERS`
+ - `OTEL_EXPORTER_OTLP_TRACES_HEADERS`
+ - `OTEL_EXPORTER_OTLP_METRICS_HEADERS`
+ - `OTEL_EXPORTER_OTLP_COMPRESSION`
+ - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION`
+ - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION`
+ - `OTEL_EXPORTER_OTLP_TIMEOUT`
+ - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT`
+ - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT`
+ - `OTEL_EXPORTER_OTLP_CERTIFICATE`
+ - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE`
+ - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE`
+- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821)
+- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853)
+
+### Fixed
+
+- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750)
+- The Jaeger exporter now correctly sets tags for the Span status code and message.
+ This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761)
+- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag.
+ Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768)
+- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688)
+- Fixed typo for default service name in Jaeger Exporter. (#1797)
+- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814)
+- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit.
+ Instead, the exporter now splits the batch into smaller sendable batches. (#1828)
+
+### Changed
+
+- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492)
+- Jaeger exporter was updated to use thrift v0.14.1. (#1712)
+- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713)
+- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713)
+- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span.
+ The Span's SpanContext can now self-identify as being remote or not.
+ This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731)
+- Improve OTLP/gRPC exporter connection errors. (#1737)
+- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field.
+ The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748)
+- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span.
+ This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749)
+- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD`
+ to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752)
+- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757)
+- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself.
+ It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771)
+- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773)
+- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777)
+- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778)
+- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796)
+- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800)
+- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create.
+ This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822)
+- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824)
+- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument.
+ The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824)
+- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830)
+- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830)
+
+### Removed
+
+- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS`
+ These environment variables will no longer be used to override values of the Jaeger exporter (#1752)
+- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root.
+ This is unspecified behavior that the OpenTelemetry community plans to standardize in the future.
+ To prevent backwards incompatible changes when it is specified, these links are removed. (#1726)
+- Setting error status while recording error with Span from oteltest package. (#1729)
+- The concept of a remote and local Span stored in a context is unified to just the current Span.
+ Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed.
+ Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span.
+ If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731)
+- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed.
+ This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749)
+- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770)
+- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package.
+ The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804)
+- Remove the `WithDisabled` option from the Jaeger exporter.
+ To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806)
+- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter.
+ These functions for retrieving specific environment variable values are redundant of other internal functions and
+ are not intended for end user use. (#1824)
+- Removed the Jaeger exporter `WithSDKOptions` `Option`.
+ This option was used to set SDK options for the exporter creation convenience functions.
+ These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases.
+ If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825)
+- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed.
+ The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830)
+- The Jaeger exporter `Option` type is removed.
+ The type is no longer used by the exporter to configure anything.
+ All the previous configurations these options provided were duplicates of SDK configuration.
+ They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830)
+
+## [0.19.0] - 2021-03-18
+
+### Added
+
+- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586)
+- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608)
+- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702)
+- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701)
+- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703)
+
+### Changed
+
+- `trace.SpanContext` is now immutable and has no exported fields. (#1573)
+ - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known.
+- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608)
+- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608)
+- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612)
+- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656)
+- Added non-empty string check for trace `Attribute` keys. (#1659)
+- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662)
+- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673)
+- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673)
+- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692)
+- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693)
+- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693)
+
+### Removed
+
+- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549)
+- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633)
+- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs.
+ These are now returned as a SpanProcessor interface from their respective constructors. (#1638)
+- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660)
+- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663)
+- Removed `jaeger.WithProcess` configuration option. (#1673)
+- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693)
+
+### Fixed
+
+- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626)
+- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655)
+- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678)
+- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681)
+- Synchronization issues in global trace delegate implementation. (#1686)
+- Reduced excess memory usage by global `TracerProvider`. (#1687)
+
+## [0.18.0] - 2021-03-03
+
+### Added
+
+- Added `resource.Default()` for use with meter and tracer providers. (#1507)
+- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535)
+- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544)
+- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558)
+- Compatibility testing suite in the CI system for the following systems. (#1567)
+ | OS | Go Version | Architecture |
+ | ------- | ---------- | ------------ |
+ | Ubuntu | 1.15 | amd64 |
+ | Ubuntu | 1.14 | amd64 |
+ | Ubuntu | 1.15 | 386 |
+ | Ubuntu | 1.14 | 386 |
+ | MacOS | 1.15 | amd64 |
+ | MacOS | 1.14 | amd64 |
+ | Windows | 1.15 | amd64 |
+ | Windows | 1.14 | amd64 |
+ | Windows | 1.15 | 386 |
+ | Windows | 1.14 | 386 |
+
+### Changed
+
+- Replaced interface `oteltest.SpanRecorder` with its existing implementation
+ `StandardSpanRecorder`. (#1542)
+- Default span limit values to 128. (#1535)
+- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535)
+- Renamed the `otel/label` package to `otel/attribute`. (#1541)
+- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551)
+- Parallelize the CI linting and testing. (#1567)
+- Stagger timestamps in exact aggregator tests. (#1569)
+- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621)
+- Prevent end-users from implementing some interfaces (#1575)
+
+ ```
+ "otel/exporters/otlp/otlphttp".Option
+ "otel/exporters/stdout".Option
+ "otel/oteltest".Option
+ "otel/trace".TracerOption
+ "otel/trace".SpanOption
+ "otel/trace".EventOption
+ "otel/trace".LifeCycleOption
+ "otel/trace".InstrumentationOption
+ "otel/sdk/resource".Option
+ "otel/sdk/trace".ParentBasedSamplerOption
+ "otel/sdk/trace".ReadOnlySpan
+ "otel/sdk/trace".ReadWriteSpan
+ ```
+
+### Removed
+
+- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545)
+- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567)
+- Removed the `test-386` make target.
+ This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567)
+
+### Fixed
+
+- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572)
+- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577)
+- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579)
+- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581)
+- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570)
+
+## [0.17.0] - 2021-02-12
+
+### Changed
+
+- Rename project default branch from `master` to `main`. (#1505)
+- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501)
+- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528)
+- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528)
+- Move metric-related public global APIs from otel to otel/metric/global. (#1528)
+
+## Fixed
+
+- Fixed otlpgrpc reconnection issue.
+- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513)
+- The otel-collector example now uses the default OTLP receiver port of the collector.
+
+## [0.16.0] - 2021-01-13
+
+### Added
+
+- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360)
+- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369)
+- Added documentation about the project's versioning policy. (#1388)
+- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418)
+- Added codeql workflow to GitHub Actions (#1428)
+- Added Gosec workflow to GitHub Actions (#1429)
+- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420)
+- Add an OpenCensus exporter bridge. (#1444)
+
+### Changed
+
+- Rename `internal/testing` to `internal/internaltest`. (#1449)
+- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360)
+- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360)
+- Improve span duration accuracy. (#1360)
+- Migrated CI/CD from CircleCI to GitHub Actions (#1382)
+- Remove duplicate checkout from GitHub Actions workflow (#1407)
+- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412)
+- Metric `exact` aggregator includes per-point timestamps (#1412)
+- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412)
+- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369)
+- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369)
+- Unify endpoint API that related to OTel exporter. (#1401)
+- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435)
+- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430)
+- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434)
+- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432)
+- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420)
+- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447)
+- Metric Push and Pull Controller components are combined into a single "basic" Controller:
+ - `WithExporter()` and `Start()` to configure Push behavior
+ - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior
+ - `Start()` and `Stop()` accept Context. (#1378)
+- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452)
+
+### Removed
+
+- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360)
+- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412)
+- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412)
+
+### Fixed
+
+- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443)
+
+## [0.15.0] - 2020-12-10
+
+### Added
+
+- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363)
+
+### Changed
+
+- The Zipkin exporter now uses the Span status code to determine. (#1328)
+- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357)
+- Move the OpenCensus example into `example` directory. (#1359)
+- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363)
+- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374)
+- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375)
+
+### Fixed
+
+- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381)
+
+## [0.14.0] - 2020-11-19
+
+### Added
+
+- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254)
+- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259)
+- `SpanContextFromContext` returns `SpanContext` from context. (#1255)
+- `TraceState` has been added to `SpanContext`. (#1340)
+- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323)
+- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305)
+- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333)
+- Add missing tests for `sdk/trace/attributes_map.go`. (#1337)
+
+### Changed
+
+- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307)
+ - `ID` has been renamed to `TraceID`.
+ - `IDFromHex` has been renamed to `TraceIDFromHex`.
+ - `EmptySpanContext` is removed.
+- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229)
+- OTLP Exporter updates:
+ - supports OTLP v0.6.0 (#1230, #1354)
+ - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296)
+- The Sampler is now called on local child spans. (#1233)
+- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240)
+- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`.
+ This matches the returned type and fixes misuse of the term metric. (#1240)
+- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241)
+- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252)
+- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321)
+- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316)
+- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316)
+- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254)
+- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254)
+- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330)
+- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330)
+- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267)
+- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276)
+- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and
+ `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235)
+- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210)
+- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310)
+- Updated span collection limits for attribute, event and link counts to 1000 (#1318)
+- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338)
+
+### Removed
+
+- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243)
+- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy.
+ It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254)
+- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`.
+ `Tracer` and `Span` from the same module should be used in their place instead. (#1306)
+- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350)
+- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314)
+
+### Fixed
+
+- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244)
+- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258)
+- Fix condition in `label.Any`. (#1299)
+- Fix global `TracerProvider` to pass options to its configured provider. (#1329)
+- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309)
+
+## [0.13.0] - 2020-10-08
+
+### Added
+
+- OTLP Metric exporter supports Histogram aggregation. (#1209)
+- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214)
+- A Baggage API to implement the OpenTelemetry specification. (#1217)
+- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227)
+
+### Changed
+
+- Set default propagator to no-op propagator. (#1184)
+- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325)
+- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212)
+- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification.
+ They now are `Unset`, `Error`, and `Ok`.
+ They no longer track the gRPC codes. (#1214)
+- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214)
+- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325)
+- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264)
+
+### Fixed
+
+- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226)
+
+### Removed
+
+- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212)
+- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification.
+ The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212)
+- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216)
+- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217)
+- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219)
+- Nested array/slice support has been removed. (#1226)
+
+## [0.12.0] - 2020-09-24
+
+### Added
+
+- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108)
+- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s.
+ This addition was made to conform with our project option conventions. (#1155)
+- Instrumentation library information was added to the Zipkin exporter. (#1119)
+- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166)
+- More semantic conventions for k8s as resource attributes. (#1167)
+
+### Changed
+
+- Add reconnecting udp connection type to Jaeger exporter.
+ This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record.
+ It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063)
+- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`.
+ This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108)
+- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`.
+ This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108)
+- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109)
+- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package.
+ This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118)
+- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119)
+- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115)
+- Move `tools` package under `internal`. (#1141)
+- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142)
+ The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged.
+- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153)
+- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155)
+- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161)
+- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to
+ recommend the use of `newConfig()` instead of `configure()`. (#1163)
+- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163)
+- Ensure exported interface types include parameter names and update the
+ Style Guide to reflect this styling rule. (#1172)
+- Don't consider unset environment variable for resource detection to be an error. (#1170)
+- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and
+ `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`.
+- ValueObserver instruments use LastValue aggregator by default. (#1165)
+- OTLP Metric exporter supports LastValue aggregation. (#1165)
+- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185)
+- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190)
+- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190)
+- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190)
+- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190)
+- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190)
+- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190)
+- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190)
+- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190)
+- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190)
+- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190)
+- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190)
+- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190)
+- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190)
+- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
+- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
+- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
+- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
+- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192)
+- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201)
+- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195)
+- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203)
+
+### Removed
+
+- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the
+ `go.opentelemetry.io/contrib/propagators/` module. (#1191)
+- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194)
+
+### Fixed
+
+- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171)
+- Fix missing shutdown processor in otel-collector example. (#1186)
+- Fix missing shutdown processor in basic and namedtracer examples. (#1197)
+
+## [0.11.0] - 2020-08-24
+
+### Added
+
+- Support for exporting array-valued attributes via OTLP. (#992)
+- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994)
+- Support for filtering metric label sets. (#1047)
+- A dimensionality-reducing metric Processor. (#1057)
+- Integration tests for more OTel Collector Attribute types. (#1062)
+- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078)
+
+### Changed
+
+- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049)
+- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049)
+- Rename `api/testharness` to `api/apitest`. (#1049)
+- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049)
+- Change Metric Processor to merge multiple observations. (#1024)
+- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module.
+ This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038)
+- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016)
+- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042)
+- Replace `WithSyncer` with `WithBatcher` in examples. (#1044)
+- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046)
+- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060)
+- Unify Callback Function Naming.
+ Rename `*Callback` with `*Func`. (#1061)
+- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064)
+- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface.
+ This interface still supports the export of `SpanData`, but only as a slice.
+ Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078)
+- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error.
+ If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078)
+- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`.
+ This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078)
+
+### Removed
+
+- Duplicate, unused API sampler interface. (#999)
+ Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead.
+- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository.
+ This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027)
+- The `WithSpan` method of the `Tracer` interface.
+ The functionality this method provided was limited compared to what a user can provide themselves.
+ It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043)
+- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions.
+ These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077)
+- The `oterror` package. (#1026)
+- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032)
+
+### Fixed
+
+- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031)
+- Correct instrumentation version tag in Jaeger exporter. (#1037)
+- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043)
+- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050)
+- The `otel-collector` example referenced outdated collector processors. (#1006)
+
+## [0.10.0] - 2020-07-29
+
+This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages.
+
+### Added
+
+- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern.
+ These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944)
+- Add propagator option for gRPC instrumentation. (#986)
+- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987)
+
+### Changed
+
+- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function.
+ This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944)
+- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`.
+ This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963)
+- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962)
+- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968)
+ - `value.Bool` was replaced with `kv.BoolValue`.
+ - `value.Int64` was replaced with `kv.Int64Value`.
+ - `value.Uint64` was replaced with `kv.Uint64Value`.
+ - `value.Float64` was replaced with `kv.Float64Value`.
+ - `value.Int32` was replaced with `kv.Int32Value`.
+ - `value.Uint32` was replaced with `kv.Uint32Value`.
+ - `value.Float32` was replaced with `kv.Float32Value`.
+ - `value.String` was replaced with `kv.StringValue`.
+ - `value.Int` was replaced with `kv.IntValue`.
+ - `value.Uint` was replaced with `kv.UintValue`.
+ - `value.Array` was replaced with `kv.ArrayValue`.
+- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972)
+- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979)
+- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980)
+- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985)
+- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989)
+
+### Removed
+
+- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970)
+
+### Fixed
+
+- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953)
+- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957)
+- Use `global.Handle` for span export errors in the OTLP exporter. (#946)
+- Correct Go language formatting in the README documentation. (#961)
+- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977)
+- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983)
+- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984)
+
+## [0.9.0] - 2020-07-20
+
+### Added
+
+- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939)
+- A Detector to automatically detect resources from an environment variable. (#939)
+- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938)
+- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`.
+ References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942)
+
+### Changed
+
+- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948)
+
+### Removed
+
+- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943)
+
+## [0.8.0] - 2020-07-09
+
+### Added
+
+- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject.
+ A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882)
+- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882)
+- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882)
+- Add `peer.service` semantic attribute. (#898)
+- Add database-specific semantic attributes. (#899)
+- Add semantic convention for `faas.coldstart` and `container.id`. (#909)
+- Add http content size semantic conventions. (#905)
+- Include `http.request_content_length` in HTTP request basic attributes. (#905)
+- Add semantic conventions for operating system process resource attribute keys. (#919)
+- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931)
+
+### Changed
+
+- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879)
+- Use lowercase header names for B3 Multiple Headers. (#881)
+- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`.
+ This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings.
+ If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882)
+- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header.
+ Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid.
+ This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882)
+- Extend semantic conventions for RPC. (#900)
+- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920)
+ - `"api/standard".FaaSName` -> `FaaSNameKey`
+ - `"api/standard".FaaSID` -> `FaaSIDKey`
+ - `"api/standard".FaaSVersion` -> `FaaSVersionKey`
+ - `"api/standard".FaaSInstance` -> `FaaSInstanceKey`
+
+### Removed
+
+- The `FlagsUnused` trace flag is removed.
+ The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882)
+- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed.
+ If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882)
+
+### Fixed
+
+- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881)
+- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882)
+- The B3 propagator now propagates the debug flag.
+ This removes the behavior of changing the debug flag into a set sampling bit.
+ Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882)
+- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882)
+- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883)
+- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885)
+- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896)
+- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908)
+- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912)
+- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903)
+- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905)
+- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913)
+- Update otel-collector example to use the v0.5.0 collector. (#915)
+- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922)
+- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922)
+- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists.
+ This is in accordance with OpenTelemetry semantic conventions. (#922)
+- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923)
+- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925)
+- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926)
+- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930)
+
+## [0.7.0] - 2020-06-26
+
+This release implements the v0.5.0 version of the OpenTelemetry specification.
+
+### Added
+
+- The othttp instrumentation now includes default metrics. (#861)
+- This CHANGELOG file to track all changes in the project going forward.
+- Support for array type attributes. (#798)
+- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844)
+- Timestamps are now passed to exporters for each export. (#835)
+- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s.
+ This replaces the prior `Record` `struct` use for this purpose. (#835)
+- New dependabot integration to automate package upgrades. (#814)
+- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument.
+ This instrumentation version is passed on to exporters. (#811) (#805) (#802)
+- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811)
+- Environment variables for Jaeger exporter are supported. (#796)
+- New `aggregation.Kind` in the export metric API. (#808)
+- New example that uses OTLP and the collector. (#790)
+- Handle errors in the span `SetName` during span initialization. (#791)
+- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777)
+- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778)
+- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`.
+ There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778)
+- Options to specify propagators for httptrace and grpctrace instrumentation. (#784)
+- The required `application/json` header for the Zipkin exporter is included in all exports. (#774)
+- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769
+
+### Changed
+
+- Rename `Integrator` to `Processor` in the metric SDK. (#863)
+- Rename `AggregationSelector` to `AggregatorSelector`. (#859)
+- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858)
+- Rename `simple` integrator to `basic` integrator. (#857)
+- Merge otlp collector examples. (#841)
+- Change the metric SDK to support cumulative, delta, and pass-through exporters directly.
+ With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840)
+- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812)
+- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other.
+ All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`.
+ Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812)
+- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812)
+- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810)
+- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808
+- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806)
+- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791)
+- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779)
+- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781)
+
+### Removed
+
+- `Uint64NumberKind` and related functions from the API. (#864)
+- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803)
+- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775)
+
+### Fixed
+
+- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866)
+- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824)
+- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867)
+- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853)
+- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854)
+- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817)
+- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828)
+- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829)
+- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823)
+- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822)
+- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820)
+- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831)
+- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836)
+- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837)
+- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839)
+- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843)
+- Set span status from HTTP status code in the othttp instrumentation. (#832)
+- Fixed typo in push controller comment. (#834)
+- The `Aggregator` testing has been updated and cleaned. (#812)
+- `metric.Number(0)` expressions are replaced by `0` where possible. (#812)
+- Fixed `global` `handler_test.go` test failure. #804
+- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766)
+- Fixed OTLP example's accidental early close of exporter. (#807)
+- Ensure zipkin exporter reads and closes response body. (#788)
+- Update instrumentation to use `api/standard` keys instead of custom keys. (#782)
+- Clean up tools and RELEASING documentation. (#762)
+
+## [0.6.0] - 2020-05-21
+
+### Added
+
+- Support for `Resource`s in the prometheus exporter. (#757)
+- New pull controller. (#751)
+- New `UpDownSumObserver` instrument. (#750)
+- OpenTelemetry collector demo. (#711)
+- New `SumObserver` instrument. (#747)
+- New `UpDownCounter` instrument. (#745)
+- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742)
+- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731)
+
+### Changed
+
+- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761)
+- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758)
+- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756)
+- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754)
+- The prometheus exporter now uses the new pull controller. (#751)
+- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752)
+- Support use of synchronous instruments in asynchronous callbacks (#725)
+- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739)
+- Rename `Observer` instrument to `ValueObserver`. (#734)
+- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738)
+- Replace `Measure` instrument by `ValueRecorder` instrument. (#732)
+- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727)
+
+### Fixed
+
+- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755)
+- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743)
+- Fix `string` case in `kv` `Infer` function. (#746)
+- Fix panic in grpctrace client interceptors. (#740)
+- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737)
+- Rewrite span batch process queue batching logic. (#719)
+- Remove the push controller named Meter map. (#738)
+- Fix Histogram aggregator initial state (fix #735). (#736)
+- Ensure golang alpine image is running `golang-1.14` for examples. (#733)
+- Added test for grpctrace `UnaryInterceptorClient`. (#695)
+- Rearrange `api/metric` code layout. (#724)
+
+## [0.5.0] - 2020-05-13
+
+### Added
+
+- Batch `Observer` callback support. (#717)
+- Alias `api` types to root package of project. (#696)
+- Create basic `othttp.Transport` for simple client instrumentation. (#678)
+- `SetAttribute(string, interface{})` to the trace API. (#674)
+- Jaeger exporter option that allows user to specify custom http client. (#671)
+- `Stringer` and `Infer` methods to `key`s. (#662)
+
+### Changed
+
+- Rename `NewKey` in the `kv` package to just `Key`. (#721)
+- Move `core` and `key` to `kv` package. (#720)
+- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709)
+- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710)
+- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710)
+- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710)
+- Move `Number` from `core` to `api/metric` package. (#706)
+- Move `SpanContext` from `core` to `trace` package. (#692)
+- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681)
+
+### Fixed
+
+- Update tooling to run generators in all submodules. (#705)
+- gRPC interceptor regexp to match methods without a service name. (#683)
+- Use a `const` for padding 64-bit B3 trace IDs. (#701)
+- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700)
+- Left-pad 64-bit B3 trace IDs with zero. (#698)
+- Propagate at least the first W3C tracestate header. (#694)
+- Remove internal `StateLocker` implementation. (#688)
+- Increase instance size CI system uses. (#690)
+- Add a `key` benchmark and use reflection in `key.Infer()`. (#679)
+- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680)
+- Reimplement histogram using mutex instead of `StateLocker`. (#669)
+- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667)
+- Update documentation to not include any references to `WithKeys`. (#672)
+- Correct misspelling. (#668)
+- Fix clobbering of the span context if extraction fails. (#656)
+- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670)
+
+## [0.4.3] - 2020-04-24
+
+### Added
+
+- `Dockerfile` and `docker-compose.yml` to run example code. (#635)
+- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621)
+- New `api/label` package, providing common label set implementation. (#651)
+- Support for JSON marshaling of `Resources`. (#654)
+- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642)
+- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627)
+- `WithSpanFormatter` option to the othttp plugin. (#617)
+- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612)
+- The prometheus exporter now supports exporting histograms. (#601)
+- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613)
+- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613)
+- An `Equal` method to the `Resource` test the equivalence of resources. (#613)
+- An iterable structure (`AttributeIterator`) for `Resource` attributes.
+
+### Changed
+
+- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644)
+- Pass `Resources` through the metrics export pipeline. (#659)
+
+### Removed
+
+- `WithKeys` option from the metric API. (#639)
+
+### Fixed
+
+- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658)
+- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653)
+- Use type names for return values in jaeger exporter. (#648)
+- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650)
+- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647)
+- Do not cache `reflect.ValueOf()` in metric Labels. (#649)
+- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626)
+- Add error wrapping to the prometheus exporter. (#631)
+- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623)
+- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614)
+- Update `Resource` internal representation to uniquely and reliably identify resources. (#613)
+- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622)
+- Ensure spans created by httptrace client tracer reflect operation structure. (#618)
+- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610
+- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611)
+
+## [0.4.2] - 2020-03-31
+
+### Fixed
+
+- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607)
+- Fix time conversion from internal to OTLP in OTLP exporter. (#606)
+
+## [0.4.1] - 2020-03-31
+
+### Fixed
+
+- Update `tag.sh` to create signed tags. (#604)
+
+## [0.4.0] - 2020-03-30
+
+### Added
+
+- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580)
+- Script to verify examples after a new release. (#579)
+
+### Removed
+
+- The dogstatsd exporter due to lack of support.
+ This additionally removes support for statsd. (#591)
+- `LabelSet` from the metric API.
+ This is replaced by a `[]core.KeyValue` slice. (#595)
+- `Labels` from the metric API's `Meter` interface. (#595)
+
+### Changed
+
+- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574)
+- Renamed `internal/metric.Meter` to `MeterImpl`. (#580)
+- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580)
+
+### Fixed
+
+- Corrected missing return in mock span. (#582)
+- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596)
+- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588)
+- Update pre-release script to be compatible between GNU and BSD based systems. (#592)
+- Add a `RecordBatch` benchmark. (#594)
+- Moved span transforms of the OTLP exporter to the internal package. (#593)
+- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569)
+- Removed unneeded allocation on empty labels in OLTP exporter. (#597)
+- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599)
+- Update project documentation godoc.org links to pkg.go.dev. (#602)
+
+## [0.3.0] - 2020-03-21
+
+This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality.
+There is still a possibility of breaking changes.
+
+### Added
+
+- Add `Observer` metric instrument. (#474)
+- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494)
+- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459)
+- The zipkin trace exporter. (#495)
+- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545)
+- Add `StatusMessage` field to the trace `Span`. (#524)
+- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525)
+- The `Resource` type was added to the SDK. (#528)
+- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538)
+- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction.
+ Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560)
+- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560)
+- Scripts to better automate the release process. (#576)
+
+### Changed
+
+- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506)
+- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511)
+- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511)
+- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524)
+- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531)
+- Rename metric API `Options` to `Config`. (#541)
+- Rename metric `Counter` aggregator to be `Sum`. (#541)
+- Unify metric options into `Option` from instrument specific options. (#541)
+- The trace API's `TraceProvider` now support `Resource`s. (#545)
+- Correct error in zipkin module name. (#548)
+- The jaeger trace exporter now supports `Resource`s. (#551)
+- Metric SDK now supports `Resource`s.
+ The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552)
+- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557)
+- The stdout trace exporter now supports `Resource`s. (#558)
+- The metric `Descriptor` is now included at the API instead of the SDK. (#560)
+- Replace `Ordered` with an iterator in `export.Labels`. (#567)
+
+### Removed
+
+- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452)
+- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560)
+- `GetDescriptor` from the metric SDK. (#575)
+- The `Gauge` instrument from the metric API. (#537)
+
+### Fixed
+
+- Make histogram aggregator checkpoint consistent. (#438)
+- Update README with import instructions and how to build and test. (#505)
+- The default label encoding was updated to be unique. (#508)
+- Use `NewRoot` in the othttp plugin for public endpoints. (#513)
+- Fix data race in `BatchedSpanProcessor`. (#518)
+- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521
+- Use a variable-size array to represent ordered labels in maps. (#523)
+- Update the OTLP protobuf and update changed import path. (#532)
+- Use `StateLocker` implementation in `MinMaxSumCount`. (#546)
+- Eliminate goroutine leak in histogram stress test. (#547)
+- Update OTLP exporter with latest protobuf. (#550)
+- Add filters to the othttp plugin. (#556)
+- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565)
+- Encode labels once during checkpoint.
+ The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter.
+ This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572)
+- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573)
+
+## [0.2.3] - 2020-03-04
+
+### Added
+
+- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473)
+- Configurable push frequency for exporters setup pipeline. (#504)
+
+### Changed
+
+- Rename the `exporter` directory to `exporters`.
+ The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`.
+ This resulted in all subsequent releases not becoming the default latest.
+ A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages.
+ Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags.
+ Consequentially, this action also renames *all* exporter packages. (#502)
+
+### Removed
+
+- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503)
+
+## [0.2.2] - 2020-02-27
+
+### Added
+
+- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467)
+- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467)
+- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467)
+- `Config` and configuring `Option` to the propagator API. (#467)
+- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467)
+- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467)
+- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467)
+- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467)
+- Histogram aggregator. (#433)
+- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456)
+- `AlwaysParentSample` sampler to the trace API. (#455)
+- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451)
+
+### Changed
+
+- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481)
+- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481)
+- Move correlation context propagation to correlation package. (#479)
+- Do not default to putting remote span context into links. (#480)
+- `Tracer.WithSpan` updated to accept `StartOptions`. (#472)
+- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432)
+- Renamed the `export` package to `metric` to match directory structure. (#432)
+- Rename the `api/distributedcontext` package to `api/correlation`. (#444)
+- Rename the `api/propagators` package to `api/propagation`. (#444)
+- Move the propagators from the `propagators` package into the `trace` API package. (#444)
+- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462)
+- Moved all dependencies of tools package to a tools directory. (#466)
+
+### Removed
+
+- Binary propagators. (#467)
+- NOOP propagator. (#467)
+
+### Fixed
+
+- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492)
+- Fix a possible nil-dereference crash (#478)
+- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483)
+- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484)
+- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482)
+- Initialize `onError` based on `Config` in prometheus exporter. (#486)
+- Correct module name in prometheus exporter README. (#475)
+- Removed tracer name prefix from span names. (#430)
+- Fix `aggregator_test.go` import package comment. (#431)
+- Improved detail in stdout exporter. (#436)
+- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442)
+- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442)
+- Reword function documentation in gRPC plugin. (#446)
+- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441)
+- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441)
+- Upgraded to Go 1.13 in CI. (#465)
+- Correct opentelemetry.io URL in trace SDK documentation. (#464)
+- Refactored reference counting logic in SDK determination of stale records. (#468)
+- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469)
+
+## [0.2.1.1] - 2020-01-13
+
+### Fixed
+
+- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428)
+
+## [0.2.1] - 2020-01-08
+
+### Added
+
+- Global meter forwarding implementation.
+ This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392)
+- Global trace forwarding implementation.
+ This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406)
+- Standardize export pipeline creation in all exporters. (#395)
+- A testing, organization, and comments for 64-bit field alignment. (#418)
+- Script to tag all modules in the project. (#414)
+
+### Changed
+
+- Renamed `propagation` package to `propagators`. (#362)
+- Renamed `B3Propagator` propagator to `B3`. (#362)
+- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362)
+- Renamed `BinaryPropagator` propagator to `Binary`. (#362)
+- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362)
+- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362)
+- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362)
+- Renamed `SpanOption` to `StartOption` in the trace API. (#369)
+- Renamed `StartOptions` to `StartConfig` in the trace API. (#369)
+- Renamed `EndOptions` to `EndConfig` in the trace API. (#369)
+- `Number` now has a pointer receiver for its methods. (#375)
+- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379)
+- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379)
+- Renamed `Message` in Event to `Name` in the trace API. (#389)
+- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385)
+- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400)
+- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400)
+- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400)
+- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400)
+- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400)
+- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400)
+- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400)
+- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400)
+- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400)
+- Renamed the `File` option in the stdout exporter to `Writer`. (#404)
+- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case.
+
+### Fixed
+
+- Aggregator import path corrected. (#421)
+- Correct links in README. (#368)
+- The README was updated to match latest code changes in its examples. (#374)
+- Don't capitalize error statements. (#375)
+- Fix ignored errors. (#375)
+- Fix ambiguous variable naming. (#375)
+- Removed unnecessary type casting. (#375)
+- Use named parameters. (#375)
+- Updated release schedule. (#378)
+- Correct http-stackdriver example module name. (#394)
+- Removed the `http.request` span in `httptrace` package. (#397)
+- Add comments in the metrics SDK (#399)
+- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403)
+- Add documentation of compatible exporters in the README. (#405)
+- Typo fix. (#408)
+- Simplify span check logic in SDK tracer implementation. (#419)
+
+## [0.2.0] - 2019-12-03
+
+### Added
+
+- Unary gRPC tracing example. (#351)
+- Prometheus exporter. (#334)
+- Dogstatsd metrics exporter. (#326)
+
+### Changed
+
+- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352)
+- Rename `GetMeter` to `Meter`. (#357)
+- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355)
+- Rename `HTTPB3Propagator` to `B3Propagator`. (#355)
+- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355)
+- Move `/global` package to `/api/global`. (#356)
+- Rename `GetTracer` to `Tracer`. (#347)
+
+### Removed
+
+- `SetAttribute` from the `Span` interface in the trace API. (#361)
+- `AddLink` from the `Span` interface in the trace API. (#349)
+- `Link` from the `Span` interface in the trace API. (#349)
+
+### Fixed
+
+- Exclude example directories from coverage report. (#365)
+- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360)
+- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359)
+- Run the race checker for all test. (#354)
+- Redundant commands in the Makefile are removed. (#354)
+- Split the `generate` and `lint` targets of the Makefile. (#354)
+- Renames `circle-ci` target to more generic `ci` in Makefile. (#354)
+- Add example Prometheus binary to gitignore. (#358)
+- Support negative numbers with the `MaxSumCount`. (#335)
+- Resolve race conditions in `push_test.go` identified in #339. (#340)
+- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336)
+- Trace benchmark now tests both `AlwaysSample` and `NeverSample`.
+ Previously it was testing `AlwaysSample` twice. (#325)
+- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325)
+- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325)
+- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint.
+ This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly.
+ This was corrected. (#333)
+
+## [0.1.2] - 2019-11-18
+
+### Fixed
+
+- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328)
+- Removed unnecessary unslicing of parameters that are already a slice. (#324)
+
+## [0.1.1] - 2019-11-18
+
+This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch.
+
+### Added
+
+- Metrics stdout export pipeline. (#265)
+- Array aggregation for raw measure metrics. (#282)
+- The core.Value now have a `MarshalJSON` method. (#281)
+
+### Removed
+
+- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314)
+- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292)
+
+### Changed
+
+- Allocation in LabelSet construction to reduce GC overhead. (#318)
+- `trace.WithAttributes` to append values instead of replacing (#315)
+- Use a formula for tolerance in sampling tests. (#298)
+- Move export types into trace and metric-specific sub-directories. (#289)
+- `SpanKind` back to being based on an `int` type. (#288)
+
+### Fixed
+
+- URL to OpenTelemetry website in README. (#323)
+- Name of othttp default tracer. (#321)
+- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294)
+- CI modules cache to correctly restore/save from/to the cache. (#316)
+- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293)
+- README now reflects the new code structure introduced with these changes. (#291)
+- Make the basic example work. (#279)
+
+## [0.1.0] - 2019-11-04
+
+This is the first release of open-telemetry go library.
+It contains api and sdk for trace and meter.
+
+### Added
+
+- Initial OpenTelemetry trace and metric API prototypes.
+- Initial OpenTelemetry trace, metric, and export SDK packages.
+- A wireframe bridge to support compatibility with OpenTracing.
+- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup.
+- Exporters for Jaeger, Stackdriver, and stdout.
+- Propagators for binary, B3, and trace-context protocols.
+- Project information and guidelines in the form of a README and CONTRIBUTING.
+- Tools to build the project and a Makefile to automate the process.
+- Apache-2.0 license.
+- CircleCI build CI manifest files.
+- CODEOWNERS file to track owners of this project.
+
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...HEAD
+[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0
+[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2
+[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1
+[1.36.0/0.58.0/0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.36.0
+[1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0
+[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0
+[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0
+[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0
+[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0
+[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0
+[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0
+[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0
+[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0
+[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0
+[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0
+[1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0
+[1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1
+[1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0
+[1.23.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0-rc.1
+[1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0
+[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0
+[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0
+[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0
+[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1
+[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0
+[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0
+[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0
+[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1
+[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1
+[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0
+[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2
+[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1
+[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0
+[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0
+[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0
+[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2
+[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1
+[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0
+[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2
+[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1
+[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0
+[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0
+[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0
+[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0
+[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0
+[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0
+[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3
+[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2
+[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1
+[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0
+[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0
+[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1
+[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0
+[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0
+[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0
+[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0
+[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1
+[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0
+[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0
+[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3
+[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2
+[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0
+[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1
+[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0
+[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0
+[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0
+[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0
+[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0
+[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0
+[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0
+[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0
+[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0
+[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0
+[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0
+[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0
+[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0
+[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0
+[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0
+[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0
+[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3
+[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2
+[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1
+[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0
+[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0
+[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3
+[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2
+[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1
+[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1
+[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0
+[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2
+[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
+[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
+
+
+
+[Go 1.24]: https://go.dev/doc/go1.24
+[Go 1.23]: https://go.dev/doc/go1.23
+[Go 1.22]: https://go.dev/doc/go1.22
+[Go 1.21]: https://go.dev/doc/go1.21
+[Go 1.20]: https://go.dev/doc/go1.20
+[Go 1.19]: https://go.dev/doc/go1.19
+[Go 1.18]: https://go.dev/doc/go1.18
+
+[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric
+[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric
+[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace
+
+[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
new file mode 100644
index 000000000..945a07d2b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS
@@ -0,0 +1,17 @@
+#####################################################
+#
+# List of approvers for this repository
+#
+#####################################################
+#
+# Learn about membership in OpenTelemetry community:
+# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md
+#
+#
+# Learn about CODEOWNERS file format:
+# https://help.github.com/en/articles/about-code-owners
+#
+
+* @MrAlias @XSAM @dashpole @pellared @dmathieu
+
+CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
new file mode 100644
index 000000000..f9ddc281f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -0,0 +1,676 @@
+# Contributing to opentelemetry-go
+
+The Go special interest group (SIG) meets regularly. See the
+OpenTelemetry
+[community](https://github.com/open-telemetry/community#golang-sdk)
+repo for information on this and other language SIGs.
+
+See the [public meeting
+notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit)
+for a summary description of past meetings. To request edit access,
+join the meeting or get in touch on
+[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT).
+
+## Development
+
+You can view and edit the source code by cloning this repository:
+
+```sh
+git clone https://github.com/open-telemetry/opentelemetry-go.git
+```
+
+Run `make test` to run the tests instead of `go test`.
+
+There are some generated files checked into the repo. To make sure
+that the generated files are up-to-date, run `make` (or `make
+precommit` - the `precommit` target is the default).
+
+The `precommit` target also fixes the formatting of the code and
+checks the status of the go module files.
+
+Additionally, there is a `codespell` target that checks for common
+typos in the code. It is not run by default, but you can run it
+manually with `make codespell`. It will set up a virtual environment
+in `venv` and install `codespell` there.
+
+If after running `make precommit` the output of `git status` contains
+`nothing to commit, working tree clean` then it means that everything
+is up-to-date and properly formatted.
+
+## Pull Requests
+
+### How to Send Pull Requests
+
+Everyone is welcome to contribute code to `opentelemetry-go` via
+GitHub pull requests (PRs).
+
+To create a new PR, fork the project in GitHub and clone the upstream
+repo:
+
+```sh
+go get -d go.opentelemetry.io/otel
+```
+
+(This may print some warning about "build constraints exclude all Go
+files", just ignore it.)
+
+This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You
+can alternatively use `git` directly with:
+
+```sh
+git clone https://github.com/open-telemetry/opentelemetry-go
+```
+
+(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name -
+that name is a kind of a redirector to GitHub that `go get` can
+understand, but `git` does not.)
+
+This would put the project in the `opentelemetry-go` directory in
+current working directory.
+
+Enter the newly created directory and add your fork as a new remote:
+
+```sh
+git remote add git@github.com:/opentelemetry-go
+```
+
+Check out a new branch, make modifications, run linters and tests, update
+`CHANGELOG.md`, and push the branch to your fork:
+
+```sh
+git checkout -b
+# edit files
+# update changelog
+make precommit
+git add -p
+git commit
+git push
+```
+
+Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull
+request ID to the entry you added to `CHANGELOG.md`.
+
+Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request.
+Rewriting Git history makes it difficult to keep track of iterations during code review.
+All pull requests are squashed to a single commit upon merge to `main`.
+
+### How to Receive Comments
+
+* If the PR is not ready for review, please put `[WIP]` in the title,
+ tag it as `work-in-progress`, or mark it as
+ [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/).
+* Make sure CLA is signed and CI is clear.
+
+### How to Get PRs Merged
+
+A PR is considered **ready to merge** when:
+
+* It has received two qualified approvals[^1].
+
+ This is not enforced through automation, but needs to be validated by the
+ maintainer merging.
+ * At least one of the qualified approvals need to be from an
+ [Approver]/[Maintainer] affiliated with a different company than the author
+ of the PR.
+ * PRs introducing changes that have already been discussed and consensus
+ reached only need one qualified approval. The discussion and resolution
+ needs to be linked to the PR.
+ * Trivial changes[^2] only need one qualified approval.
+
+* All feedback has been addressed.
+ * All PR comments and suggestions are resolved.
+ * All GitHub Pull Request reviews with a status of "Request changes" have
+ been addressed. Another review by the objecting reviewer with a different
+ status can be submitted to clear the original review, or the review can be
+ dismissed by a [Maintainer] when the issues from the original review have
+ been addressed.
+ * Any comments or reviews that cannot be resolved between the PR author and
+ reviewers can be submitted to the community [Approver]s and [Maintainer]s
+ during the weekly SIG meeting. If consensus is reached among the
+ [Approver]s and [Maintainer]s during the SIG meeting the objections to the
+ PR may be dismissed or resolved or the PR closed by a [Maintainer].
+ * Any substantive changes to the PR require existing Approval reviews be
+ cleared unless the approver explicitly states that their approval persists
+ across changes. This includes changes resulting from other feedback.
+ [Approver]s and [Maintainer]s can help in clearing reviews and they should
+ be consulted if there are any questions.
+
+* The PR branch is up to date with the base branch it is merging into.
+ * To ensure this does not block the PR, it should be configured to allow
+ maintainers to update it.
+
+* It has been open for review for at least one working day. This gives people
+ reasonable time to review.
+ * Trivial changes[^2] do not have to wait for one day and may be merged with
+ a single [Maintainer]'s approval.
+
+* All required GitHub workflows have succeeded.
+* Urgent fix can take exception as long as it has been actively communicated
+ among [Maintainer]s.
+
+Any [Maintainer] can merge the PR once the above criteria have been met.
+
+[^1]: A qualified approval is a GitHub Pull Request review with "Approve"
+ status from an OpenTelemetry Go [Approver] or [Maintainer].
+[^2]: Trivial changes include: typo corrections, cosmetic non-substantive
+ changes, documentation corrections or updates, dependency updates, etc.
+
+## Design Choices
+
+As with other OpenTelemetry clients, opentelemetry-go follows the
+[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel).
+
+It's especially valuable to read through the [library
+guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines).
+
+### Focus on Capabilities, Not Structure Compliance
+
+OpenTelemetry is an evolving specification, one where the desires and
+use cases are clear, but the method to satisfy those uses cases are
+not.
+
+As such, Contributions should provide functionality and behavior that
+conforms to the specification, but the interface and structure is
+flexible.
+
+It is preferable to have contributions follow the idioms of the
+language rather than conform to specific API names or argument
+patterns in the spec.
+
+For a deeper discussion, see
+[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165).
+
+## Tests
+
+Each functionality should be covered by tests.
+
+Performance-critical functionality should also be covered by benchmarks.
+
+- Pull requests adding a performance-critical functionality
+should have `go test -bench` output in their description.
+- Pull requests changing a performance-critical functionality
+should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat)
+output in their description.
+
+## Documentation
+
+Each (non-internal, non-test) package must be documented using
+[Go Doc Comments](https://go.dev/doc/comment),
+preferably in a `doc.go` file.
+
+Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples)
+instead of putting code snippets in Go doc comments.
+In some cases, you can even create [Testable Examples](https://go.dev/blog/examples).
+
+You can install and run a "local Go Doc site" in the following way:
+
+ ```sh
+ go install golang.org/x/pkgsite/cmd/pkgsite@latest
+ pkgsite
+ ```
+
+[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric)
+is an example of a very well-documented package.
+
+### README files
+
+Each (non-internal, non-test, non-documentation) package must contain a
+`README.md` file containing at least a title, and a `pkg.go.dev` badge.
+
+The README should not be a repetition of Go doc comments.
+
+You can verify the presence of all README files with the `make verify-readmes`
+command.
+
+## Style Guide
+
+One of the primary goals of this project is that it is actually used by
+developers. With this goal in mind the project strives to build
+user-friendly and idiomatic Go code adhering to the Go community's best
+practices.
+
+For a non-comprehensive but foundational overview of these best practices
+the [Effective Go](https://golang.org/doc/effective_go.html) documentation
+is an excellent starting place.
+
+As a convenience for developers building this project the `make precommit`
+will format, lint, validate, and in some cases fix the changes you plan to
+submit. This check will need to pass for your changes to be able to be
+merged.
+
+In addition to idiomatic Go, the project has adopted certain standards for
+implementations of common patterns. These standards should be followed as a
+default, and if they are not followed documentation needs to be included as
+to the reasons why.
+
+### Configuration
+
+When creating an instantiation function for a complex `type T struct`, it is
+useful to allow variable number of options to be applied. However, the strong
+type system of Go restricts the function design options. There are a few ways
+to solve this problem, but we have landed on the following design.
+
+#### `config`
+
+Configuration should be held in a `struct` named `config`, or prefixed with
+specific type name this Configuration applies to if there are multiple
+`config` in the package. This type must contain configuration options.
+
+```go
+// config contains configuration options for a thing.
+type config struct {
+ // options ...
+}
+```
+
+In general the `config` type will not need to be used externally to the
+package and should be unexported. If, however, it is expected that the user
+will likely want to build custom options for the configuration, the `config`
+should be exported. Please, include in the documentation for the `config`
+how the user can extend the configuration.
+
+It is important that internal `config` are not shared across package boundaries.
+Meaning a `config` from one package should not be directly used by another. The
+one exception is the API packages. The configs from the base API, eg.
+`go.opentelemetry.io/otel/trace.TracerConfig` and
+`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed
+by the SDK therefore it is expected that these are exported.
+
+When a config is exported we want to maintain forward and backward
+compatibility, to achieve this no fields should be exported but should
+instead be accessed by methods.
+
+Optionally, it is common to include a `newConfig` function (with the same
+naming scheme). This function wraps any defaults setting and looping over
+all options to create a configured `config`.
+
+```go
+// newConfig returns an appropriately configured config.
+func newConfig(options ...Option) config {
+ // Set default values for config.
+ config := config{/* […] */}
+ for _, option := range options {
+ config = option.apply(config)
+ }
+ // Perform any validation here.
+ return config
+}
+```
+
+If validation of the `config` options is also performed this can return an
+error as well that is expected to be handled by the instantiation function
+or propagated to the user.
+
+Given the design goal of not having the user need to work with the `config`,
+the `newConfig` function should also be unexported.
+
+#### `Option`
+
+To set the value of the options a `config` contains, a corresponding
+`Option` interface type should be used.
+
+```go
+type Option interface {
+ apply(config) config
+}
+```
+
+Having `apply` unexported makes sure that it will not be used externally.
+Moreover, the interface becomes sealed so the user cannot easily implement
+the interface on its own.
+
+The `apply` method should return a modified version of the passed config.
+This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap.
+
+The name of the interface should be prefixed in the same way the
+corresponding `config` is (if at all).
+
+#### Options
+
+All user configurable options for a `config` must have a related unexported
+implementation of the `Option` interface and an exported configuration
+function that wraps this implementation.
+
+The wrapping function name should be prefixed with `With*` (or in the
+special case of a boolean options `Without*`) and should have the following
+function signature.
+
+```go
+func With*(…) Option { … }
+```
+
+##### `bool` Options
+
+```go
+type defaultFalseOption bool
+
+func (o defaultFalseOption) apply(c config) config {
+ c.Bool = bool(o)
+ return c
+}
+
+// WithOption sets a T to have an option included.
+func WithOption() Option {
+ return defaultFalseOption(true)
+}
+```
+
+```go
+type defaultTrueOption bool
+
+func (o defaultTrueOption) apply(c config) config {
+ c.Bool = bool(o)
+ return c
+}
+
+// WithoutOption sets a T to have Bool option excluded.
+func WithoutOption() Option {
+ return defaultTrueOption(false)
+}
+```
+
+##### Declared Type Options
+
+```go
+type myTypeOption struct {
+ MyType MyType
+}
+
+func (o myTypeOption) apply(c config) config {
+ c.MyType = o.MyType
+ return c
+}
+
+// WithMyType sets T to have include MyType.
+func WithMyType(t MyType) Option {
+ return myTypeOption{t}
+}
+```
+
+##### Functional Options
+
+```go
+type optionFunc func(config) config
+
+func (fn optionFunc) apply(c config) config {
+ return fn(c)
+}
+
+// WithMyType sets t as MyType.
+func WithMyType(t MyType) Option {
+ return optionFunc(func(c config) config {
+ c.MyType = t
+ return c
+ })
+}
+```
+
+#### Instantiation
+
+Using this configuration pattern to configure instantiation with a `NewT`
+function.
+
+```go
+func NewT(options ...Option) T {…}
+```
+
+Any required parameters can be declared before the variadic `options`.
+
+#### Dealing with Overlap
+
+Sometimes there are multiple complex `struct` that share common
+configuration and also have distinct configuration. To avoid repeated
+portions of `config`s, a common `config` can be used with the union of
+options being handled with the `Option` interface.
+
+For example.
+
+```go
+// config holds options for all animals.
+type config struct {
+ Weight float64
+ Color string
+ MaxAltitude float64
+}
+
+// DogOption apply Dog specific options.
+type DogOption interface {
+ applyDog(config) config
+}
+
+// BirdOption apply Bird specific options.
+type BirdOption interface {
+ applyBird(config) config
+}
+
+// Option apply options for all animals.
+type Option interface {
+ BirdOption
+ DogOption
+}
+
+type weightOption float64
+
+func (o weightOption) applyDog(c config) config {
+ c.Weight = float64(o)
+ return c
+}
+
+func (o weightOption) applyBird(c config) config {
+ c.Weight = float64(o)
+ return c
+}
+
+func WithWeight(w float64) Option { return weightOption(w) }
+
+type furColorOption string
+
+func (o furColorOption) applyDog(c config) config {
+ c.Color = string(o)
+ return c
+}
+
+func WithFurColor(c string) DogOption { return furColorOption(c) }
+
+type maxAltitudeOption float64
+
+func (o maxAltitudeOption) applyBird(c config) config {
+ c.MaxAltitude = float64(o)
+ return c
+}
+
+func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) }
+
+func NewDog(name string, o ...DogOption) Dog {…}
+func NewBird(name string, o ...BirdOption) Bird {…}
+```
+
+### Interfaces
+
+To allow other developers to better comprehend the code, it is important
+to ensure it is sufficiently documented. One simple measure that contributes
+to this aim is self-documenting by naming method parameters. Therefore,
+where appropriate, methods of every exported interface type should have
+their parameters appropriately named.
+
+#### Interface Stability
+
+All exported stable interfaces that include the following warning in their
+documentation are allowed to be extended with additional methods.
+
+> Warning: methods may be added to this interface in minor releases.
+
+These interfaces are defined by the OpenTelemetry specification and will be
+updated as the specification evolves.
+
+Otherwise, stable interfaces MUST NOT be modified.
+
+#### How to Change Specification Interfaces
+
+When an API change must be made, we will update the SDK with the new method one
+release before the API change. This will allow the SDK one version before the
+API change to work seamlessly with the new API.
+
+If an incompatible version of the SDK is used with the new API the application
+will fail to compile.
+
+#### How Not to Change Specification Interfaces
+
+We have explored using a v2 of the API to change interfaces and found that there
+was no way to introduce a v2 and have it work seamlessly with the v1 of the API.
+Problems happened with libraries that upgraded to v2 when an application did not,
+and would not produce any telemetry.
+
+More detail of the approaches considered and their limitations can be found in
+the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920)
+issue.
+
+#### How to Change Other Interfaces
+
+If new functionality is needed for an interface that cannot be changed it MUST
+be added by including an additional interface. That added interface can be a
+simple interface for the specific functionality that you want to add or it can
+be a super-set of the original interface. For example, if you wanted to a
+`Close` method to the `Exporter` interface:
+
+```go
+type Exporter interface {
+ Export()
+}
+```
+
+A new interface, `Closer`, can be added:
+
+```go
+type Closer interface {
+ Close()
+}
+```
+
+Code that is passed the `Exporter` interface can now check to see if the passed
+value also satisfies the new interface. E.g.
+
+```go
+func caller(e Exporter) {
+ /* ... */
+ if c, ok := e.(Closer); ok {
+ c.Close()
+ }
+ /* ... */
+}
+```
+
+Alternatively, a new type that is the super-set of an `Exporter` can be created.
+
+```go
+type ClosingExporter struct {
+ Exporter
+ Close()
+}
+```
+
+This new type can be used similar to the simple interface above in that a
+passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type
+and the `Close` method called.
+
+This super-set approach can be useful if there is explicit behavior that needs
+to be coupled with the original type and passed as a unified type to a new
+function, but, because of this coupling, it also limits the applicability of
+the added functionality. If there exist other interfaces where this
+functionality should be added, each one will need their own super-set
+interfaces and will duplicate the pattern. For this reason, the simple targeted
+interface that defines the specific functionality should be preferred.
+
+See also:
+[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces).
+
+### Testing
+
+The tests should never leak goroutines.
+
+Use the term `ConcurrentSafe` in the test name when it aims to verify the
+absence of race conditions. The top-level tests with this term will be run
+many times in the `test-concurrent-safe` CI job to increase the chance of
+catching concurrency issues. This does not apply to subtests when this term
+is not in their root name.
+
+### Internal packages
+
+The use of internal packages should be scoped to a single module. A sub-module
+should never import from a parent internal package. This creates a coupling
+between the two modules where a user can upgrade the parent without the child
+and if the internal package API has changed it will fail to upgrade[^3].
+
+There are two known exceptions to this rule:
+
+- `go.opentelemetry.io/otel/internal/global`
+ - This package manages global state for all of opentelemetry-go. It needs to
+ be a single package in order to ensure the uniqueness of the global state.
+- `go.opentelemetry.io/otel/internal/baggage`
+ - This package provides values in a `context.Context` that need to be
+ recognized by `go.opentelemetry.io/otel/baggage` and
+ `go.opentelemetry.io/otel/bridge/opentracing` but remain private.
+
+If you have duplicate code in multiple modules, make that code into a Go
+template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl]
+to render the templates in the desired locations. See [#4404] for an example of
+this.
+
+[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548
+
+### Ignoring context cancellation
+
+OpenTelemetry API implementations need to ignore the cancellation of the context that are
+passed when recording a value (e.g. starting a span, recording a measurement, emitting a log).
+Recording methods should not return an error describing the cancellation state of the context
+when they complete, nor should they abort any work.
+
+This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for
+the method. In that case the context cancellation can be used for the timeout with the
+restriction that this behavior is documented for the method. Otherwise, timeouts
+are expected to be handled by the user calling the API, not the implementation.
+
+Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method
+of a provider. It is assumed the context passed from a user is not used for this purpose.
+
+Outside of the direct recording of telemetry from the API (e.g. exporting telemetry,
+force flushing telemetry, shutting down a signal provider) the context cancellation
+should be honored. This means all work done on behalf of the user provided context
+should be canceled.
+
+## Approvers and Maintainers
+
+### Triagers
+
+- [Alex Kats](https://github.com/akats7), Capital One
+- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
+
+### Approvers
+
+### Maintainers
+
+- [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832))
+- [David Ashpole](https://github.com/dashpole), Google ([GPG](https://keys.openpgp.org/search?q=C0D1BDDCAAEAE573673085F176327DA4D864DC70))
+- [Robert Pająk](https://github.com/pellared), Splunk ([GPG](https://keys.openpgp.org/search?q=CDAD3A60476A3DE599AA5092E5F7C35A4DBE90C2))
+- [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA))
+- [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A))
+
+### Emeritus
+
+- [Aaron Clawson](https://github.com/MadVikingGod)
+- [Anthony Mirabella](https://github.com/Aneurysm9)
+- [Chester Cheung](https://github.com/hanyuancheung)
+- [Evan Torrie](https://github.com/evantorrie)
+- [Gustavo Silva Paiva](https://github.com/paivagustavo)
+- [Josh MacDonald](https://github.com/jmacd)
+- [Liz Fong-Jones](https://github.com/lizthegrey)
+
+### Become an Approver or a Maintainer
+
+See the [community membership document in OpenTelemetry community
+repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md).
+
+[Approver]: #approvers
+[Maintainer]: #maintainers
+[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl
+[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404
diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE
similarity index 100%
rename from vendor/github.com/prometheus/client_model/LICENSE
rename to vendor/go.opentelemetry.io/otel/LICENSE
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
new file mode 100644
index 000000000..4fa423ca0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -0,0 +1,329 @@
+# Copyright The OpenTelemetry Authors
+# SPDX-License-Identifier: Apache-2.0
+
+TOOLS_MOD_DIR := ./internal/tools
+
+ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
+ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
+OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS))
+ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
+
+GO = go
+TIMEOUT = 60
+
+# User to run as in docker images.
+DOCKER_USER=$(shell id -u):$(shell id -g)
+DEPENDENCIES_DOCKERFILE=./dependencies.Dockerfile
+
+.DEFAULT_GOAL := precommit
+
+.PHONY: precommit ci
+precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default
+ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage
+
+# Tools
+
+TOOLS = $(CURDIR)/.tools
+
+$(TOOLS):
+ @mkdir -p $@
+$(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS)
+ cd $(TOOLS_MOD_DIR) && \
+ $(GO) build -o $@ $(PACKAGE)
+
+MULTIMOD = $(TOOLS)/multimod
+$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod
+
+SEMCONVGEN = $(TOOLS)/semconvgen
+$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen
+
+CROSSLINK = $(TOOLS)/crosslink
+$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink
+
+SEMCONVKIT = $(TOOLS)/semconvkit
+$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit
+
+VERIFYREADMES = $(TOOLS)/verifyreadmes
+$(TOOLS)/verifyreadmes: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/verifyreadmes
+
+GOLANGCI_LINT = $(TOOLS)/golangci-lint
+$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/v2/cmd/golangci-lint
+
+MISSPELL = $(TOOLS)/misspell
+$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell
+
+GOCOVMERGE = $(TOOLS)/gocovmerge
+$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge
+
+STRINGER = $(TOOLS)/stringer
+$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer
+
+PORTO = $(TOOLS)/porto
+$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
+
+GOTMPL = $(TOOLS)/gotmpl
+$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
+
+GORELEASE = $(TOOLS)/gorelease
+$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease
+
+GOVULNCHECK = $(TOOLS)/govulncheck
+$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
+
+.PHONY: tools
+tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
+
+# Virtualized python tools via docker
+
+# The directory where the virtual environment is created.
+VENVDIR := venv
+
+# The directory where the python tools are installed.
+PYTOOLS := $(VENVDIR)/bin
+
+# The pip executable in the virtual environment.
+PIP := $(PYTOOLS)/pip
+
+# The directory in the docker image where the current directory is mounted.
+WORKDIR := /workdir
+
+# The python image to use for the virtual environment.
+PYTHONIMAGE := $(shell awk '$$4=="python" {print $$2}' $(DEPENDENCIES_DOCKERFILE))
+
+# Run the python image with the current directory mounted.
+DOCKERPY := docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE)
+
+# Create a virtual environment for Python tools.
+$(PYTOOLS):
+# The `--upgrade` flag is needed to ensure that the virtual environment is
+# created with the latest pip version.
+ @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade --cache-dir=$(WORKDIR)/.cache/pip pip"
+
+# Install python packages into the virtual environment.
+$(PYTOOLS)/%: $(PYTOOLS)
+ @$(DOCKERPY) $(PIP) install --cache-dir=$(WORKDIR)/.cache/pip -r requirements.txt
+
+CODESPELL = $(PYTOOLS)/codespell
+$(CODESPELL): PACKAGE=codespell
+
+# Generate
+
+.PHONY: generate
+generate: go-generate vanity-import-fix
+
+.PHONY: go-generate
+go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%)
+go-generate/%: DIR=$*
+go-generate/%: $(STRINGER) $(GOTMPL)
+ @echo "$(GO) generate $(DIR)/..." \
+ && cd $(DIR) \
+ && PATH="$(TOOLS):$${PATH}" $(GO) generate ./...
+
+.PHONY: vanity-import-fix
+vanity-import-fix: $(PORTO)
+ @$(PORTO) --include-internal -w .
+
+# Generate go.work file for local development.
+.PHONY: go-work
+go-work: $(CROSSLINK)
+ $(CROSSLINK) work --root=$(shell pwd) --go=1.22.7
+
+# Build
+
+.PHONY: build
+
+build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%)
+build/%: DIR=$*
+build/%:
+ @echo "$(GO) build $(DIR)/..." \
+ && cd $(DIR) \
+ && $(GO) build ./...
+
+build-tests/%: DIR=$*
+build-tests/%:
+ @echo "$(GO) build tests $(DIR)/..." \
+ && cd $(DIR) \
+ && $(GO) list ./... \
+ | grep -v third_party \
+ | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null
+
+# Tests
+
+TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe
+.PHONY: $(TEST_TARGETS) test
+test-default test-race: ARGS=-race
+test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
+test-short: ARGS=-short
+test-verbose: ARGS=-v -race
+test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race
+test-concurrent-safe: TIMEOUT=120
+$(TEST_TARGETS): test
+test: $(OTEL_GO_MOD_DIRS:%=test/%)
+test/%: DIR=$*
+test/%:
+ @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \
+ && cd $(DIR) \
+ && $(GO) list ./... \
+ | grep -v third_party \
+ | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS)
+
+COVERAGE_MODE = atomic
+COVERAGE_PROFILE = coverage.out
+.PHONY: test-coverage
+test-coverage: $(GOCOVMERGE)
+ @set -e; \
+ printf "" > coverage.txt; \
+ for dir in $(ALL_COVERAGE_MOD_DIRS); do \
+ echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \
+ (cd "$${dir}" && \
+ $(GO) list ./... \
+ | grep -v third_party \
+ | grep -v 'semconv/v.*' \
+ | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \
+ $(GO) tool cover -html=coverage.out -o coverage.html); \
+ done; \
+ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
+
+.PHONY: benchmark
+benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%)
+benchmark/%:
+ @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \
+ && cd $* \
+ && $(GO) list ./... \
+ | grep -v third_party \
+ | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=.
+
+.PHONY: golangci-lint golangci-lint-fix
+golangci-lint-fix: ARGS=--fix
+golangci-lint-fix: golangci-lint
+golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%)
+golangci-lint/%: DIR=$*
+golangci-lint/%: $(GOLANGCI_LINT)
+ @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \
+ && cd $(DIR) \
+ && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS)
+
+.PHONY: crosslink
+crosslink: $(CROSSLINK)
+ @echo "Updating intra-repository dependencies in all go modules" \
+ && $(CROSSLINK) --root=$(shell pwd) --prune
+
+.PHONY: go-mod-tidy
+go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%)
+go-mod-tidy/%: DIR=$*
+go-mod-tidy/%: crosslink
+ @echo "$(GO) mod tidy in $(DIR)" \
+ && cd $(DIR) \
+ && $(GO) mod tidy -compat=1.21
+
+.PHONY: lint
+lint: misspell go-mod-tidy golangci-lint govulncheck
+
+.PHONY: vanity-import-check
+vanity-import-check: $(PORTO)
+ @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 )
+
+.PHONY: misspell
+misspell: $(MISSPELL)
+ @$(MISSPELL) -w $(ALL_DOCS)
+
+.PHONY: govulncheck
+govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%)
+govulncheck/%: DIR=$*
+govulncheck/%: $(GOVULNCHECK)
+ @echo "govulncheck ./... in $(DIR)" \
+ && cd $(DIR) \
+ && $(GOVULNCHECK) ./...
+
+.PHONY: codespell
+codespell: $(CODESPELL)
+ @$(DOCKERPY) $(CODESPELL)
+
+.PHONY: toolchain-check
+toolchain-check:
+ @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \
+ awk '/^toolchain/ { found=1; next } END { if (found) print FILENAME }' $$f/go.mod; \
+ done); \
+ if [ -n "$${toolchainRes}" ]; then \
+ echo "toolchain checking failed:"; echo "$${toolchainRes}"; \
+ exit 1; \
+ fi
+
+.PHONY: license-check
+license-check:
+ @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \
+ awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \
+ done); \
+ if [ -n "$${licRes}" ]; then \
+ echo "license header checking failed:"; echo "$${licRes}"; \
+ exit 1; \
+ fi
+
+.PHONY: check-clean-work-tree
+check-clean-work-tree:
+ @if ! git diff --quiet; then \
+ echo; \
+ echo 'Working tree is not clean, did you forget to run "make precommit"?'; \
+ echo; \
+ git status; \
+ exit 1; \
+ fi
+
+# The weaver docker image to use for semconv-generate.
+WEAVER_IMAGE := $(shell awk '$$4=="weaver" {print $$2}' $(DEPENDENCIES_DOCKERFILE))
+
+SEMCONVPKG ?= "semconv/"
+.PHONY: semconv-generate
+semconv-generate: $(SEMCONVKIT)
+ [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 )
+ # Ensure the target directory for source code is available.
+ mkdir -p $(PWD)/$(SEMCONVPKG)/${TAG}
+ # Note: We mount a home directory for downloading/storing the semconv repository.
+ # Weaver will automatically clean the cache when finished, but the directories will remain.
+ mkdir -p ~/.weaver
+ docker run --rm \
+ -u $(DOCKER_USER) \
+ --env HOME=/tmp/weaver \
+ --mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \
+ --mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \
+ --mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \
+ $(WEAVER_IMAGE) registry generate \
+ --registry=https://github.com/open-telemetry/semantic-conventions/archive/refs/tags/$(TAG).zip[model] \
+ --templates=/home/weaver/templates \
+ --param tag=$(TAG) \
+ go \
+ /home/weaver/target
+ $(SEMCONVKIT) -semconv "$(SEMCONVPKG)" -tag "$(TAG)"
+
+.PHONY: gorelease
+gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%)
+gorelease/%: DIR=$*
+gorelease/%:| $(GORELEASE)
+ @echo "gorelease in $(DIR):" \
+ && cd $(DIR) \
+ && $(GORELEASE) \
+ || echo ""
+
+.PHONY: verify-mods
+verify-mods: $(MULTIMOD)
+ $(MULTIMOD) verify
+
+.PHONY: prerelease
+prerelease: verify-mods
+ @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
+ $(MULTIMOD) prerelease -m ${MODSET}
+
+COMMIT ?= "HEAD"
+.PHONY: add-tags
+add-tags: verify-mods
+ @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
+ $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
+
+MARKDOWNIMAGE := $(shell awk '$$4=="markdown" {print $$2}' $(DEPENDENCIES_DOCKERFILE))
+.PHONY: lint-markdown
+lint-markdown:
+ docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" $(MARKDOWNIMAGE) -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md
+
+.PHONY: verify-readmes
+verify-readmes: $(VERIFYREADMES)
+ $(VERIFYREADMES)
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
new file mode 100644
index 000000000..5fa1b75c6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -0,0 +1,115 @@
+# OpenTelemetry-Go
+
+[](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml)
+[](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main)
+[](https://pkg.go.dev/go.opentelemetry.io/otel)
+[](https://goreportcard.com/report/go.opentelemetry.io/otel)
+[](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go)
+[](https://www.bestpractices.dev/projects/9996)
+[](https://issues.oss-fuzz.com/issues?q=project:opentelemetry-go)
+[](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go?ref=badge_shield&issueType=license)
+[](https://cloud-native.slack.com/archives/C01NPAXACKT)
+
+OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/).
+It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms.
+
+## Project Status
+
+| Signal | Status |
+|---------|--------------------|
+| Traces | Stable |
+| Metrics | Stable |
+| Logs | Beta[^1] |
+
+Progress and status specific to this repository is tracked in our
+[project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
+and
+[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones).
+
+Project versioning information and stability guarantees can be found in the
+[versioning documentation](VERSIONING.md).
+
+[^1]: https://github.com/orgs/open-telemetry/projects/43
+
+### Compatibility
+
+OpenTelemetry-Go ensures compatibility with the current supported versions of
+the [Go language](https://golang.org/doc/devel/release#policy):
+
+> Each major Go release is supported until there are two newer major releases.
+> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release.
+
+For versions of Go that are no longer supported upstream, opentelemetry-go will
+stop ensuring compatibility with these versions in the following manner:
+
+- A minor release of opentelemetry-go will be made to add support for the new
+ supported release of Go.
+- The following minor release of opentelemetry-go will remove compatibility
+ testing for the oldest (now archived upstream) version of Go. This, and
+ future, releases of opentelemetry-go may include features only supported by
+ the currently supported versions of Go.
+
+Currently, this project supports the following environments.
+
+| OS | Go Version | Architecture |
+|----------|------------|--------------|
+| Ubuntu | 1.24 | amd64 |
+| Ubuntu | 1.23 | amd64 |
+| Ubuntu | 1.24 | 386 |
+| Ubuntu | 1.23 | 386 |
+| Ubuntu | 1.24 | arm64 |
+| Ubuntu | 1.23 | arm64 |
+| macOS 13 | 1.24 | amd64 |
+| macOS 13 | 1.23 | amd64 |
+| macOS | 1.24 | arm64 |
+| macOS | 1.23 | arm64 |
+| Windows | 1.24 | amd64 |
+| Windows | 1.23 | amd64 |
+| Windows | 1.24 | 386 |
+| Windows | 1.23 | 386 |
+
+While this project should work for other systems, no compatibility guarantees
+are made for those systems currently.
+
+## Getting Started
+
+You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/languages/go/getting-started/).
+
+OpenTelemetry's goal is to provide a single set of APIs to capture distributed
+traces and metrics from your application and send them to an observability
+platform. This project allows you to do just that for applications written in
+Go. There are two steps to this process: instrument your application, and
+configure an exporter.
+
+### Instrumentation
+
+To start capturing distributed traces and metric events from your application
+it first needs to be instrumented. The easiest way to do this is by using an
+instrumentation library for your code. Be sure to check out [the officially
+supported instrumentation
+libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation).
+
+If you need to extend the telemetry an instrumentation library provides or want
+to build your own instrumentation for your application directly you will need
+to use the
+[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel)
+package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples)
+are a good way to see some practical uses of this process.
+
+### Export
+
+Now that your application is instrumented to collect telemetry, it needs an
+export pipeline to send that telemetry to an observability platform.
+
+All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters).
+
+| Exporter | Logs | Metrics | Traces |
+|---------------------------------------|:----:|:-------:|:------:|
+| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ |
+| [Prometheus](./exporters/prometheus/) | | ✓ | |
+| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ |
+| [Zipkin](./exporters/zipkin/) | | | ✓ |
+
+## Contributing
+
+See the [contributing documentation](CONTRIBUTING.md).
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
new file mode 100644
index 000000000..1ddcdef03
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -0,0 +1,173 @@
+# Release Process
+
+## Create a `Version Release` issue
+
+Create a `Version Release` issue to track the release process.
+
+## Semantic Convention Generation
+
+New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated.
+The `semconv-generate` make target is used for this.
+
+1. Set the `TAG` environment variable to the semantic convention tag you want to generate.
+2. Run the `make semconv-generate ...` target from this repository.
+
+For example,
+
+```sh
+export TAG="v1.30.0" # Change to the release version you are generating.
+make semconv-generate # Uses the exported TAG.
+```
+
+This should create a new sub-package of [`semconv`](./semconv).
+Ensure things look correct before submitting a pull request to include the addition.
+
+## Breaking changes validation
+
+You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API.
+
+You can check/report problems with `gorelease` [here](https://golang.org/issues/26420).
+
+## Verify changes for contrib repository
+
+If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository.
+
+Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes.
+
+## Pre-Release
+
+First, decide which module sets will be released and update their versions
+in `versions.yaml`. Commit this change to a new branch.
+
+Update go.mod for submodules to depend on the new release which will happen in the next step.
+
+1. Run the `prerelease` make target. It creates a branch
+ `prerelease__` that will contain all release changes.
+
+ ```
+ make prerelease MODSET=
+ ```
+
+2. Verify the changes.
+
+ ```
+ git diff ...prerelease__
+ ```
+
+ This should have changed the version for all modules to be ``.
+ If these changes look correct, merge them into your pre-release branch:
+
+ ```go
+ git merge prerelease__
+ ```
+
+3. Update the [Changelog](./CHANGELOG.md).
+ - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand.
+ To verify this, you can look directly at the commits since the ``.
+
+ ```
+ git --no-pager log --pretty=oneline "..HEAD"
+ ```
+
+ - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `).
+ - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future.
+ - Update all the appropriate links at the bottom.
+
+4. Push the changes to upstream and create a Pull Request on GitHub.
+ Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description.
+
+## Tag
+
+Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit.
+
+***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step!
+Failure to do so will leave things in a broken state. As long as you do not
+change `versions.yaml` between pre-release and this step, things should be fine.
+
+***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189).
+It is critical you make sure the version you push upstream is correct.
+[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331).
+
+1. For each module set that will be released, run the `add-tags` make target
+ using the `` of the commit on the main branch for the merged Pull Request.
+
+ ```
+ make add-tags MODSET= COMMIT=
+ ```
+
+ It should only be necessary to provide an explicit `COMMIT` value if the
+ current `HEAD` of your working directory is not the correct commit.
+
+2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`).
+ Make sure you push all sub-modules as well.
+
+ ```
+ git push upstream
+ git push upstream
+ ...
+ ```
+
+## Release
+
+Finally create a Release for the new `` on GitHub.
+The release body should include all the release notes from the Changelog for this release.
+
+### Sign the Release Artifact
+
+To ensure we comply with CNCF best practices, we need to sign the release artifact.
+The tarball attached to the GitHub release needs to be signed with your GPG key.
+
+Follow [these steps] to sign the release artifact and upload it to GitHub.
+You can use [this script] to verify the contents of the tarball before signing it.
+
+Be sure to use the correct GPG key when signing the release artifact.
+
+```terminal
+gpg --local-user --armor --detach-sign opentelemetry-go-.tar.gz
+```
+
+You can verify the signature with:
+
+```terminal
+gpg --verify opentelemetry-go-.tar.gz.asc opentelemetry-go-.tar.gz
+```
+
+[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases
+[this script]: https://github.com/MrAlias/attest-sh
+
+## Post-Release
+
+### Contrib Repository
+
+Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release.
+
+### Website Documentation
+
+Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/languages/go].
+Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
+
+[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions
+[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/
+[content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go
+
+### Close the milestone
+
+Once a release is made, ensure all issues that were fixed and PRs that were merged as part of this release are added to the corresponding milestone.
+This helps track what changes were included in each release.
+
+- To find issues that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/issues?q=is%3Aissue%20no%3Amilestone%20is%3Aclosed%20sort%3Aupdated-desc%20reason%3Acompleted%20-label%3AStale%20linked%3Apr)
+- To find merged PRs that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/pulls?q=is%3Apr+no%3Amilestone+is%3Amerged).
+
+Once all related issues and PRs have been added to the milestone, close the milestone.
+
+### Demo Repository
+
+Bump the dependencies in the following Go services:
+
+- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting)
+- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout)
+- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog)
+
+### Close the `Version Release` issue
+
+Once the todo list in the `Version Release` issue is complete, close the issue.
diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md
new file mode 100644
index 000000000..b8cb605c1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md
@@ -0,0 +1,224 @@
+# Versioning
+
+This document describes the versioning policy for this repository. This policy
+is designed so the following goals can be achieved.
+
+**Users are provided a codebase of value that is stable and secure.**
+
+## Policy
+
+* Versioning of this project will be idiomatic of a Go project using [Go
+ modules](https://github.com/golang/go/wiki/Modules).
+ * [Semantic import
+ versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning)
+ will be used.
+ * Versions will comply with [semver
+ 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions.
+ * New methods may be added to exported API interfaces. All exported
+ interfaces that fall within this exception will include the following
+ paragraph in their public documentation.
+
+ > Warning: methods may be added to this interface in minor releases.
+
+ * If a module is version `v2` or higher, the major version of the module
+ must be included as a `/vN` at the end of the module paths used in
+ `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require
+ go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path
+ (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the
+ paths used in `go get` commands (e.g., `go get
+ go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a
+ `@v2.0.1` in that example. One way to think about it is that the module
+ name now includes the `/v2`, so include `/v2` whenever you are using the
+ module name).
+ * If a module is version `v0` or `v1`, do not include the major version in
+ either the module path or the import path.
+ * Modules will be used to encapsulate signals and components.
+ * Experimental modules still under active development will be versioned at
+ `v0` to imply the stability guarantee defined by
+ [semver](https://semver.org/spec/v2.0.0.html#spec-item-4).
+
+ > Major version zero (0.y.z) is for initial development. Anything MAY
+ > change at any time. The public API SHOULD NOT be considered stable.
+
+ * Mature modules for which we guarantee a stable public API will be versioned
+ with a major version greater than `v0`.
+ * The decision to make a module stable will be made on a case-by-case
+ basis by the maintainers of this project.
+ * Experimental modules will start their versioning at `v0.0.0` and will
+ increment their minor version when backwards incompatible changes are
+ released and increment their patch version when backwards compatible
+ changes are released.
+ * All stable modules that use the same major version number will use the
+ same entire version number.
+ * Stable modules may be released with an incremented minor or patch
+ version even though that module has not been changed, but rather so
+ that it will remain at the same version as other stable modules that
+ did undergo change.
+ * When an experimental module becomes stable a new stable module version
+ will be released and will include this now stable module. The new
+ stable module version will be an increment of the minor version number
+ and will be applied to all existing stable modules as well as the newly
+ stable module being released.
+* Versioning of the associated [contrib
+ repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of
+ this project will be idiomatic of a Go project using [Go
+ modules](https://github.com/golang/go/wiki/Modules).
+ * [Semantic import
+ versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning)
+ will be used.
+ * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html).
+ * If a module is version `v2` or higher, the
+ major version of the module must be included as a `/vN` at the end of the
+ module paths used in `go.mod` files (e.g., `module
+ go.opentelemetry.io/contrib/instrumentation/host/v2`, `require
+ go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the
+ package import path (e.g., `import
+ "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes
+ the paths used in `go get` commands (e.g., `go get
+ go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there
+ is both a `/v2` and a `@v2.0.1` in that example. One way to think about
+ it is that the module name now includes the `/v2`, so include `/v2`
+ whenever you are using the module name).
+ * If a module is version `v0` or `v1`, do not include the major version
+ in either the module path or the import path.
+ * In addition to public APIs, telemetry produced by stable instrumentation
+ will remain stable and backwards compatible. This is to avoid breaking
+ alerts and dashboard.
+ * Modules will be used to encapsulate instrumentation, detectors, exporters,
+ propagators, and any other independent sets of related components.
+ * Experimental modules still under active development will be versioned at
+ `v0` to imply the stability guarantee defined by
+ [semver](https://semver.org/spec/v2.0.0.html#spec-item-4).
+
+ > Major version zero (0.y.z) is for initial development. Anything MAY
+ > change at any time. The public API SHOULD NOT be considered stable.
+
+ * Mature modules for which we guarantee a stable public API and telemetry will
+ be versioned with a major version greater than `v0`.
+ * Experimental modules will start their versioning at `v0.0.0` and will
+ increment their minor version when backwards incompatible changes are
+ released and increment their patch version when backwards compatible
+ changes are released.
+ * Stable contrib modules cannot depend on experimental modules from this
+ project.
+ * All stable contrib modules of the same major version with this project
+ will use the same entire version as this project.
+ * Stable modules may be released with an incremented minor or patch
+ version even though that module's code has not been changed. Instead
+ the only change that will have been included is to have updated that
+ modules dependency on this project's stable APIs.
+ * When an experimental module in contrib becomes stable a new stable
+ module version will be released and will include this now stable
+ module. The new stable module version will be an increment of the minor
+ version number and will be applied to all existing stable contrib
+ modules, this project's modules, and the newly stable module being
+ released.
+ * Contrib modules will be kept up to date with this project's releases.
+ * Due to the dependency contrib modules will implicitly have on this
+ project's modules the release of stable contrib modules to match the
+ released version number will be staggered after this project's release.
+ There is no explicit time guarantee for how long after this projects
+ release the contrib release will be. Effort should be made to keep them
+ as close in time as possible.
+ * No additional stable release in this project can be made until the
+ contrib repository has a matching stable release.
+ * No release can be made in the contrib repository after this project's
+ stable release except for a stable release of the contrib repository.
+* GitHub releases will be made for all releases.
+* Go modules will be made available at Go package mirrors.
+
+## Example Versioning Lifecycle
+
+To better understand the implementation of the above policy the following
+example is provided. This project is simplified to include only the following
+modules and their versions:
+
+* `otel`: `v0.14.0`
+* `otel/trace`: `v0.14.0`
+* `otel/metric`: `v0.14.0`
+* `otel/baggage`: `v0.14.0`
+* `otel/sdk/trace`: `v0.14.0`
+* `otel/sdk/metric`: `v0.14.0`
+
+These modules have been developed to a point where the `otel/trace`,
+`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they
+should be considered for a stable release. The `otel/metric` and
+`otel/sdk/metric` are still under active development and the `otel` module
+depends on both `otel/trace` and `otel/metric`.
+
+The `otel` package is refactored to remove its dependencies on `otel/metric` so
+it can be released as stable as well. With that done the following release
+candidates are made:
+
+* `otel`: `v1.0.0-RC1`
+* `otel/trace`: `v1.0.0-RC1`
+* `otel/baggage`: `v1.0.0-RC1`
+* `otel/sdk/trace`: `v1.0.0-RC1`
+
+The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`.
+
+A few minor issues are discovered in the `otel/trace` package. These issues are
+resolved with some minor, but backwards incompatible, changes and are released
+as a second release candidate:
+
+* `otel`: `v1.0.0-RC2`
+* `otel/trace`: `v1.0.0-RC2`
+* `otel/baggage`: `v1.0.0-RC2`
+* `otel/sdk/trace`: `v1.0.0-RC2`
+
+Notice that all module version numbers are incremented to adhere to our
+versioning policy.
+
+After these release candidates have been evaluated to satisfaction, they are
+released as version `v1.0.0`.
+
+* `otel`: `v1.0.0`
+* `otel/trace`: `v1.0.0`
+* `otel/baggage`: `v1.0.0`
+* `otel/sdk/trace`: `v1.0.0`
+
+Since both the `go` utility and the Go module system support [the semantic
+versioning definition of
+precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release
+will correctly be interpreted as the successor to the previous release
+candidates.
+
+Active development of this project continues. The `otel/metric` module now has
+backwards incompatible changes to its API that need to be released and the
+`otel/baggage` module has a minor bug fix that needs to be released. The
+following release is made:
+
+* `otel`: `v1.0.1`
+* `otel/trace`: `v1.0.1`
+* `otel/metric`: `v0.15.0`
+* `otel/baggage`: `v1.0.1`
+* `otel/sdk/trace`: `v1.0.1`
+* `otel/sdk/metric`: `v0.15.0`
+
+Notice that, again, all stable module versions are incremented in unison and
+the `otel/sdk/metric` package, which depends on the `otel/metric` package, also
+bumped its version. This bump of the `otel/sdk/metric` package makes sense
+given their coupling, though it is not explicitly required by our versioning
+policy.
+
+As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a
+point where they should be evaluated for stability. The `otel` module is
+reintegrated with the `otel/metric` package and the following release is made:
+
+* `otel`: `v1.1.0-RC1`
+* `otel/trace`: `v1.1.0-RC1`
+* `otel/metric`: `v1.1.0-RC1`
+* `otel/baggage`: `v1.1.0-RC1`
+* `otel/sdk/trace`: `v1.1.0-RC1`
+* `otel/sdk/metric`: `v1.1.0-RC1`
+
+All the modules are evaluated and determined to a viable stable release. They
+are then released as version `v1.1.0` (the minor version is incremented to
+indicate the addition of new signal).
+
+* `otel`: `v1.1.0`
+* `otel/trace`: `v1.1.0`
+* `otel/metric`: `v1.1.0`
+* `otel/baggage`: `v1.1.0`
+* `otel/sdk/trace`: `v1.1.0`
+* `otel/sdk/metric`: `v1.1.0`
diff --git a/vendor/go.opentelemetry.io/otel/attribute/README.md b/vendor/go.opentelemetry.io/otel/attribute/README.md
new file mode 100644
index 000000000..5b3da8f14
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/README.md
@@ -0,0 +1,3 @@
+# Attribute
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/attribute)
diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go
new file mode 100644
index 000000000..eef51ebc2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go
@@ -0,0 +1,5 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package attribute provides key and value attributes.
+package attribute // import "go.opentelemetry.io/otel/attribute"
diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
new file mode 100644
index 000000000..318e42fca
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
@@ -0,0 +1,135 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+ "bytes"
+ "sync"
+ "sync/atomic"
+)
+
+type (
+ // Encoder is a mechanism for serializing an attribute set into a specific
+ // string representation that supports caching, to avoid repeated
+ // serialization. An example could be an exporter encoding the attribute
+ // set into a wire representation.
+ Encoder interface {
+ // Encode returns the serialized encoding of the attribute set using
+ // its Iterator. This result may be cached by a attribute.Set.
+ Encode(iterator Iterator) string
+
+ // ID returns a value that is unique for each class of attribute
+ // encoder. Attribute encoders allocate these using `NewEncoderID`.
+ ID() EncoderID
+ }
+
+ // EncoderID is used to identify distinct Encoder
+ // implementations, for caching encoded results.
+ EncoderID struct {
+ value uint64
+ }
+
+ // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of
+ // allocations used in encoding attributes. This implementation encodes a
+ // comma-separated list of key=value, with '/'-escaping of '=', ',', and
+ // '\'.
+ defaultAttrEncoder struct {
+ // pool is a pool of attribute set builders. The buffers in this pool
+ // grow to a size that most attribute encodings will not allocate new
+ // memory.
+ pool sync.Pool // *bytes.Buffer
+ }
+)
+
+// escapeChar is used to ensure uniqueness of the attribute encoding where
+// keys or values contain either '=' or ','. Since there is no parser needed
+// for this encoding and its only requirement is to be unique, this choice is
+// arbitrary. Users will see these in some exporters (e.g., stdout), so the
+// backslash ('\') is used as a conventional choice.
+const escapeChar = '\\'
+
+var (
+ _ Encoder = &defaultAttrEncoder{}
+
+ // encoderIDCounter is for generating IDs for other attribute encoders.
+ encoderIDCounter uint64
+
+ defaultEncoderOnce sync.Once
+ defaultEncoderID = NewEncoderID()
+ defaultEncoderInstance *defaultAttrEncoder
+)
+
+// NewEncoderID returns a unique attribute encoder ID. It should be called
+// once per each type of attribute encoder. Preferably in init() or in var
+// definition.
+func NewEncoderID() EncoderID {
+ return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)}
+}
+
+// DefaultEncoder returns an attribute encoder that encodes attributes in such
+// a way that each escaped attribute's key is followed by an equal sign and
+// then by an escaped attribute's value. All key-value pairs are separated by
+// a comma.
+//
+// Escaping is done by prepending a backslash before either a backslash, equal
+// sign or a comma.
+func DefaultEncoder() Encoder {
+ defaultEncoderOnce.Do(func() {
+ defaultEncoderInstance = &defaultAttrEncoder{
+ pool: sync.Pool{
+ New: func() interface{} {
+ return &bytes.Buffer{}
+ },
+ },
+ }
+ })
+ return defaultEncoderInstance
+}
+
+// Encode is a part of an implementation of the AttributeEncoder interface.
+func (d *defaultAttrEncoder) Encode(iter Iterator) string {
+ buf := d.pool.Get().(*bytes.Buffer)
+ defer d.pool.Put(buf)
+ buf.Reset()
+
+ for iter.Next() {
+ i, keyValue := iter.IndexedAttribute()
+ if i > 0 {
+ _, _ = buf.WriteRune(',')
+ }
+ copyAndEscape(buf, string(keyValue.Key))
+
+ _, _ = buf.WriteRune('=')
+
+ if keyValue.Value.Type() == STRING {
+ copyAndEscape(buf, keyValue.Value.AsString())
+ } else {
+ _, _ = buf.WriteString(keyValue.Value.Emit())
+ }
+ }
+ return buf.String()
+}
+
+// ID is a part of an implementation of the AttributeEncoder interface.
+func (*defaultAttrEncoder) ID() EncoderID {
+ return defaultEncoderID
+}
+
+// copyAndEscape escapes `=`, `,` and its own escape character (`\`),
+// making the default encoding unique.
+func copyAndEscape(buf *bytes.Buffer, val string) {
+ for _, ch := range val {
+ switch ch {
+ case '=', ',', escapeChar:
+ _, _ = buf.WriteRune(escapeChar)
+ }
+ _, _ = buf.WriteRune(ch)
+ }
+}
+
+// Valid returns true if this encoder ID was allocated by
+// `NewEncoderID`. Invalid encoder IDs will not be cached.
+func (id EncoderID) Valid() bool {
+ return id.value != 0
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go
new file mode 100644
index 000000000..3eeaa5d44
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go
@@ -0,0 +1,49 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+// Filter supports removing certain attributes from attribute sets. When
+// the filter returns true, the attribute will be kept in the filtered
+// attribute set. When the filter returns false, the attribute is excluded
+// from the filtered attribute set, and the attribute instead appears in
+// the removed list of excluded attributes.
+type Filter func(KeyValue) bool
+
+// NewAllowKeysFilter returns a Filter that only allows attributes with one of
+// the provided keys.
+//
+// If keys is empty a deny-all filter is returned.
+func NewAllowKeysFilter(keys ...Key) Filter {
+ if len(keys) <= 0 {
+ return func(kv KeyValue) bool { return false }
+ }
+
+ allowed := make(map[Key]struct{}, len(keys))
+ for _, k := range keys {
+ allowed[k] = struct{}{}
+ }
+ return func(kv KeyValue) bool {
+ _, ok := allowed[kv.Key]
+ return ok
+ }
+}
+
+// NewDenyKeysFilter returns a Filter that only allows attributes
+// that do not have one of the provided keys.
+//
+// If keys is empty an allow-all filter is returned.
+func NewDenyKeysFilter(keys ...Key) Filter {
+ if len(keys) <= 0 {
+ return func(kv KeyValue) bool { return true }
+ }
+
+ forbid := make(map[Key]struct{}, len(keys))
+ for _, k := range keys {
+ forbid[k] = struct{}{}
+ }
+ return func(kv KeyValue) bool {
+ _, ok := forbid[kv.Key]
+ return !ok
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go
new file mode 100644
index 000000000..b76d2bbfd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go
@@ -0,0 +1,96 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package attribute provide several helper functions for some commonly used
+logic of processing attributes.
+*/
+package attribute // import "go.opentelemetry.io/otel/attribute/internal"
+
+import (
+ "reflect"
+)
+
+// BoolSliceValue converts a bool slice into an array with same elements as slice.
+func BoolSliceValue(v []bool) interface{} {
+ var zero bool
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
+}
+
+// Int64SliceValue converts an int64 slice into an array with same elements as slice.
+func Int64SliceValue(v []int64) interface{} {
+ var zero int64
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
+}
+
+// Float64SliceValue converts a float64 slice into an array with same elements as slice.
+func Float64SliceValue(v []float64) interface{} {
+ var zero float64
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
+}
+
+// StringSliceValue converts a string slice into an array with same elements as slice.
+func StringSliceValue(v []string) interface{} {
+ var zero string
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
+}
+
+// AsBoolSlice converts a bool array into a slice into with same elements as array.
+func AsBoolSlice(v interface{}) []bool {
+ rv := reflect.ValueOf(v)
+ if rv.Type().Kind() != reflect.Array {
+ return nil
+ }
+ cpy := make([]bool, rv.Len())
+ if len(cpy) > 0 {
+ _ = reflect.Copy(reflect.ValueOf(cpy), rv)
+ }
+ return cpy
+}
+
+// AsInt64Slice converts an int64 array into a slice into with same elements as array.
+func AsInt64Slice(v interface{}) []int64 {
+ rv := reflect.ValueOf(v)
+ if rv.Type().Kind() != reflect.Array {
+ return nil
+ }
+ cpy := make([]int64, rv.Len())
+ if len(cpy) > 0 {
+ _ = reflect.Copy(reflect.ValueOf(cpy), rv)
+ }
+ return cpy
+}
+
+// AsFloat64Slice converts a float64 array into a slice into with same elements as array.
+func AsFloat64Slice(v interface{}) []float64 {
+ rv := reflect.ValueOf(v)
+ if rv.Type().Kind() != reflect.Array {
+ return nil
+ }
+ cpy := make([]float64, rv.Len())
+ if len(cpy) > 0 {
+ _ = reflect.Copy(reflect.ValueOf(cpy), rv)
+ }
+ return cpy
+}
+
+// AsStringSlice converts a string array into a slice into with same elements as array.
+func AsStringSlice(v interface{}) []string {
+ rv := reflect.ValueOf(v)
+ if rv.Type().Kind() != reflect.Array {
+ return nil
+ }
+ cpy := make([]string, rv.Len())
+ if len(cpy) > 0 {
+ _ = reflect.Copy(reflect.ValueOf(cpy), rv)
+ }
+ return cpy
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
new file mode 100644
index 000000000..f2ba89ce4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
@@ -0,0 +1,150 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+// Iterator allows iterating over the set of attributes in order, sorted by
+// key.
+type Iterator struct {
+ storage *Set
+ idx int
+}
+
+// MergeIterator supports iterating over two sets of attributes while
+// eliminating duplicate values from the combined set. The first iterator
+// value takes precedence.
+type MergeIterator struct {
+ one oneIterator
+ two oneIterator
+ current KeyValue
+}
+
+type oneIterator struct {
+ iter Iterator
+ done bool
+ attr KeyValue
+}
+
+// Next moves the iterator to the next position. Returns false if there are no
+// more attributes.
+func (i *Iterator) Next() bool {
+ i.idx++
+ return i.idx < i.Len()
+}
+
+// Label returns current KeyValue. Must be called only after Next returns
+// true.
+//
+// Deprecated: Use Attribute instead.
+func (i *Iterator) Label() KeyValue {
+ return i.Attribute()
+}
+
+// Attribute returns the current KeyValue of the Iterator. It must be called
+// only after Next returns true.
+func (i *Iterator) Attribute() KeyValue {
+ kv, _ := i.storage.Get(i.idx)
+ return kv
+}
+
+// IndexedLabel returns current index and attribute. Must be called only
+// after Next returns true.
+//
+// Deprecated: Use IndexedAttribute instead.
+func (i *Iterator) IndexedLabel() (int, KeyValue) {
+ return i.idx, i.Attribute()
+}
+
+// IndexedAttribute returns current index and attribute. Must be called only
+// after Next returns true.
+func (i *Iterator) IndexedAttribute() (int, KeyValue) {
+ return i.idx, i.Attribute()
+}
+
+// Len returns a number of attributes in the iterated set.
+func (i *Iterator) Len() int {
+ return i.storage.Len()
+}
+
+// ToSlice is a convenience function that creates a slice of attributes from
+// the passed iterator. The iterator is set up to start from the beginning
+// before creating the slice.
+func (i *Iterator) ToSlice() []KeyValue {
+ l := i.Len()
+ if l == 0 {
+ return nil
+ }
+ i.idx = -1
+ slice := make([]KeyValue, 0, l)
+ for i.Next() {
+ slice = append(slice, i.Attribute())
+ }
+ return slice
+}
+
+// NewMergeIterator returns a MergeIterator for merging two attribute sets.
+// Duplicates are resolved by taking the value from the first set.
+func NewMergeIterator(s1, s2 *Set) MergeIterator {
+ mi := MergeIterator{
+ one: makeOne(s1.Iter()),
+ two: makeOne(s2.Iter()),
+ }
+ return mi
+}
+
+func makeOne(iter Iterator) oneIterator {
+ oi := oneIterator{
+ iter: iter,
+ }
+ oi.advance()
+ return oi
+}
+
+func (oi *oneIterator) advance() {
+ if oi.done = !oi.iter.Next(); !oi.done {
+ oi.attr = oi.iter.Attribute()
+ }
+}
+
+// Next returns true if there is another attribute available.
+func (m *MergeIterator) Next() bool {
+ if m.one.done && m.two.done {
+ return false
+ }
+ if m.one.done {
+ m.current = m.two.attr
+ m.two.advance()
+ return true
+ }
+ if m.two.done {
+ m.current = m.one.attr
+ m.one.advance()
+ return true
+ }
+ if m.one.attr.Key == m.two.attr.Key {
+ m.current = m.one.attr // first iterator attribute value wins
+ m.one.advance()
+ m.two.advance()
+ return true
+ }
+ if m.one.attr.Key < m.two.attr.Key {
+ m.current = m.one.attr
+ m.one.advance()
+ return true
+ }
+ m.current = m.two.attr
+ m.two.advance()
+ return true
+}
+
+// Label returns the current value after Next() returns true.
+//
+// Deprecated: Use Attribute instead.
+func (m *MergeIterator) Label() KeyValue {
+ return m.current
+}
+
+// Attribute returns the current value after Next() returns true.
+func (m *MergeIterator) Attribute() KeyValue {
+ return m.current
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go
new file mode 100644
index 000000000..d9a22c650
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/key.go
@@ -0,0 +1,123 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+// Key represents the key part in key-value pairs. It's a string. The
+// allowed character set in the key depends on the use of the key.
+type Key string
+
+// Bool creates a KeyValue instance with a BOOL Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Bool(name, value).
+func (k Key) Bool(v bool) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: BoolValue(v),
+ }
+}
+
+// BoolSlice creates a KeyValue instance with a BOOLSLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- BoolSlice(name, value).
+func (k Key) BoolSlice(v []bool) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: BoolSliceValue(v),
+ }
+}
+
+// Int creates a KeyValue instance with an INT64 Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Int(name, value).
+func (k Key) Int(v int) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: IntValue(v),
+ }
+}
+
+// IntSlice creates a KeyValue instance with an INT64SLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- IntSlice(name, value).
+func (k Key) IntSlice(v []int) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: IntSliceValue(v),
+ }
+}
+
+// Int64 creates a KeyValue instance with an INT64 Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Int64(name, value).
+func (k Key) Int64(v int64) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: Int64Value(v),
+ }
+}
+
+// Int64Slice creates a KeyValue instance with an INT64SLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Int64Slice(name, value).
+func (k Key) Int64Slice(v []int64) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: Int64SliceValue(v),
+ }
+}
+
+// Float64 creates a KeyValue instance with a FLOAT64 Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Float64(name, value).
+func (k Key) Float64(v float64) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: Float64Value(v),
+ }
+}
+
+// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Float64(name, value).
+func (k Key) Float64Slice(v []float64) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: Float64SliceValue(v),
+ }
+}
+
+// String creates a KeyValue instance with a STRING Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- String(name, value).
+func (k Key) String(v string) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: StringValue(v),
+ }
+}
+
+// StringSlice creates a KeyValue instance with a STRINGSLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- StringSlice(name, value).
+func (k Key) StringSlice(v []string) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: StringSliceValue(v),
+ }
+}
+
+// Defined returns true for non-empty keys.
+func (k Key) Defined() bool {
+ return len(k) != 0
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go
new file mode 100644
index 000000000..3028f9a40
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go
@@ -0,0 +1,75 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+ "fmt"
+)
+
+// KeyValue holds a key and value pair.
+type KeyValue struct {
+ Key Key
+ Value Value
+}
+
+// Valid returns if kv is a valid OpenTelemetry attribute.
+func (kv KeyValue) Valid() bool {
+ return kv.Key.Defined() && kv.Value.Type() != INVALID
+}
+
+// Bool creates a KeyValue with a BOOL Value type.
+func Bool(k string, v bool) KeyValue {
+ return Key(k).Bool(v)
+}
+
+// BoolSlice creates a KeyValue with a BOOLSLICE Value type.
+func BoolSlice(k string, v []bool) KeyValue {
+ return Key(k).BoolSlice(v)
+}
+
+// Int creates a KeyValue with an INT64 Value type.
+func Int(k string, v int) KeyValue {
+ return Key(k).Int(v)
+}
+
+// IntSlice creates a KeyValue with an INT64SLICE Value type.
+func IntSlice(k string, v []int) KeyValue {
+ return Key(k).IntSlice(v)
+}
+
+// Int64 creates a KeyValue with an INT64 Value type.
+func Int64(k string, v int64) KeyValue {
+ return Key(k).Int64(v)
+}
+
+// Int64Slice creates a KeyValue with an INT64SLICE Value type.
+func Int64Slice(k string, v []int64) KeyValue {
+ return Key(k).Int64Slice(v)
+}
+
+// Float64 creates a KeyValue with a FLOAT64 Value type.
+func Float64(k string, v float64) KeyValue {
+ return Key(k).Float64(v)
+}
+
+// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type.
+func Float64Slice(k string, v []float64) KeyValue {
+ return Key(k).Float64Slice(v)
+}
+
+// String creates a KeyValue with a STRING Value type.
+func String(k, v string) KeyValue {
+ return Key(k).String(v)
+}
+
+// StringSlice creates a KeyValue with a STRINGSLICE Value type.
+func StringSlice(k string, v []string) KeyValue {
+ return Key(k).StringSlice(v)
+}
+
+// Stringer creates a new key-value pair with a passed name and a string
+// value generated by the passed Stringer interface.
+func Stringer(k string, v fmt.Stringer) KeyValue {
+ return Key(k).String(v.String())
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go b/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go
new file mode 100644
index 000000000..5791c6e7a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go
@@ -0,0 +1,37 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+ "math"
+)
+
+func boolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag.
+ if b {
+ return 1
+ }
+ return 0
+}
+
+func rawToBool(r uint64) bool {
+ return r != 0
+}
+
+func int64ToRaw(i int64) uint64 {
+ // Assumes original was a valid int64 (overflow not checked).
+ return uint64(i) // nolint: gosec
+}
+
+func rawToInt64(r uint64) int64 {
+ // Assumes original was a valid int64 (overflow not checked).
+ return int64(r) // nolint: gosec
+}
+
+func float64ToRaw(f float64) uint64 {
+ return math.Float64bits(f)
+}
+
+func rawToFloat64(r uint64) float64 {
+ return math.Float64frombits(r)
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
new file mode 100644
index 000000000..6cbefcead
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/set.go
@@ -0,0 +1,411 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+ "cmp"
+ "encoding/json"
+ "reflect"
+ "slices"
+ "sort"
+)
+
+type (
+ // Set is the representation for a distinct attribute set. It manages an
+ // immutable set of attributes, with an internal cache for storing
+ // attribute encodings.
+ //
+ // This type will remain comparable for backwards compatibility. The
+ // equivalence of Sets across versions is not guaranteed to be stable.
+ // Prior versions may find two Sets to be equal or not when compared
+ // directly (i.e. ==), but subsequent versions may not. Users should use
+ // the Equals method to ensure stable equivalence checking.
+ //
+ // Users should also use the Distinct returned from Equivalent as a map key
+ // instead of a Set directly. In addition to that type providing guarantees
+ // on stable equivalence, it may also provide performance improvements.
+ Set struct {
+ equivalent Distinct
+ }
+
+ // Distinct is a unique identifier of a Set.
+ //
+ // Distinct is designed to be ensures equivalence stability: comparisons
+ // will return the save value across versions. For this reason, Distinct
+ // should always be used as a map key instead of a Set.
+ Distinct struct {
+ iface interface{}
+ }
+
+ // Sortable implements sort.Interface, used for sorting KeyValue.
+ //
+ // Deprecated: This type is no longer used. It was added as a performance
+ // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no
+ // longer supported by the module).
+ Sortable []KeyValue
+)
+
+var (
+ // keyValueType is used in computeDistinctReflect.
+ keyValueType = reflect.TypeOf(KeyValue{})
+
+ // emptySet is returned for empty attribute sets.
+ emptySet = &Set{
+ equivalent: Distinct{
+ iface: [0]KeyValue{},
+ },
+ }
+)
+
+// EmptySet returns a reference to a Set with no elements.
+//
+// This is a convenience provided for optimized calling utility.
+func EmptySet() *Set {
+ return emptySet
+}
+
+// reflectValue abbreviates reflect.ValueOf(d).
+func (d Distinct) reflectValue() reflect.Value {
+ return reflect.ValueOf(d.iface)
+}
+
+// Valid returns true if this value refers to a valid Set.
+func (d Distinct) Valid() bool {
+ return d.iface != nil
+}
+
+// Len returns the number of attributes in this set.
+func (l *Set) Len() int {
+ if l == nil || !l.equivalent.Valid() {
+ return 0
+ }
+ return l.equivalent.reflectValue().Len()
+}
+
+// Get returns the KeyValue at ordered position idx in this set.
+func (l *Set) Get(idx int) (KeyValue, bool) {
+ if l == nil || !l.equivalent.Valid() {
+ return KeyValue{}, false
+ }
+ value := l.equivalent.reflectValue()
+
+ if idx >= 0 && idx < value.Len() {
+ // Note: The Go compiler successfully avoids an allocation for
+ // the interface{} conversion here:
+ return value.Index(idx).Interface().(KeyValue), true
+ }
+
+ return KeyValue{}, false
+}
+
+// Value returns the value of a specified key in this set.
+func (l *Set) Value(k Key) (Value, bool) {
+ if l == nil || !l.equivalent.Valid() {
+ return Value{}, false
+ }
+ rValue := l.equivalent.reflectValue()
+ vlen := rValue.Len()
+
+ idx := sort.Search(vlen, func(idx int) bool {
+ return rValue.Index(idx).Interface().(KeyValue).Key >= k
+ })
+ if idx >= vlen {
+ return Value{}, false
+ }
+ keyValue := rValue.Index(idx).Interface().(KeyValue)
+ if k == keyValue.Key {
+ return keyValue.Value, true
+ }
+ return Value{}, false
+}
+
+// HasValue tests whether a key is defined in this set.
+func (l *Set) HasValue(k Key) bool {
+ if l == nil {
+ return false
+ }
+ _, ok := l.Value(k)
+ return ok
+}
+
+// Iter returns an iterator for visiting the attributes in this set.
+func (l *Set) Iter() Iterator {
+ return Iterator{
+ storage: l,
+ idx: -1,
+ }
+}
+
+// ToSlice returns the set of attributes belonging to this set, sorted, where
+// keys appear no more than once.
+func (l *Set) ToSlice() []KeyValue {
+ iter := l.Iter()
+ return iter.ToSlice()
+}
+
+// Equivalent returns a value that may be used as a map key. The Distinct type
+// guarantees that the result will equal the equivalent. Distinct value of any
+// attribute set with the same elements as this, where sets are made unique by
+// choosing the last value in the input for any given key.
+func (l *Set) Equivalent() Distinct {
+ if l == nil || !l.equivalent.Valid() {
+ return emptySet.equivalent
+ }
+ return l.equivalent
+}
+
+// Equals returns true if the argument set is equivalent to this set.
+func (l *Set) Equals(o *Set) bool {
+ return l.Equivalent() == o.Equivalent()
+}
+
+// Encoded returns the encoded form of this set, according to encoder.
+func (l *Set) Encoded(encoder Encoder) string {
+ if l == nil || encoder == nil {
+ return ""
+ }
+
+ return encoder.Encode(l.Iter())
+}
+
+func empty() Set {
+ return Set{
+ equivalent: emptySet.equivalent,
+ }
+}
+
+// NewSet returns a new Set. See the documentation for
+// NewSetWithSortableFiltered for more details.
+//
+// Except for empty sets, this method adds an additional allocation compared
+// with calls that include a Sortable.
+func NewSet(kvs ...KeyValue) Set {
+ s, _ := NewSetWithFiltered(kvs, nil)
+ return s
+}
+
+// NewSetWithSortable returns a new Set. See the documentation for
+// NewSetWithSortableFiltered for more details.
+//
+// This call includes a Sortable option as a memory optimization.
+//
+// Deprecated: Use [NewSet] instead.
+func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set {
+ s, _ := NewSetWithFiltered(kvs, nil)
+ return s
+}
+
+// NewSetWithFiltered returns a new Set. See the documentation for
+// NewSetWithSortableFiltered for more details.
+//
+// This call includes a Filter to include/exclude attribute keys from the
+// return value. Excluded keys are returned as a slice of attribute values.
+func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
+ // Check for empty set.
+ if len(kvs) == 0 {
+ return empty(), nil
+ }
+
+ // Stable sort so the following de-duplication can implement
+ // last-value-wins semantics.
+ slices.SortStableFunc(kvs, func(a, b KeyValue) int {
+ return cmp.Compare(a.Key, b.Key)
+ })
+
+ position := len(kvs) - 1
+ offset := position - 1
+
+ // The requirements stated above require that the stable
+ // result be placed in the end of the input slice, while
+ // overwritten values are swapped to the beginning.
+ //
+ // De-duplicate with last-value-wins semantics. Preserve
+ // duplicate values at the beginning of the input slice.
+ for ; offset >= 0; offset-- {
+ if kvs[offset].Key == kvs[position].Key {
+ continue
+ }
+ position--
+ kvs[offset], kvs[position] = kvs[position], kvs[offset]
+ }
+ kvs = kvs[position:]
+
+ if filter != nil {
+ if div := filteredToFront(kvs, filter); div != 0 {
+ return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div]
+ }
+ }
+ return Set{equivalent: computeDistinct(kvs)}, nil
+}
+
+// NewSetWithSortableFiltered returns a new Set.
+//
+// Duplicate keys are eliminated by taking the last value. This
+// re-orders the input slice so that unique last-values are contiguous
+// at the end of the slice.
+//
+// This ensures the following:
+//
+// - Last-value-wins semantics
+// - Caller sees the reordering, but doesn't lose values
+// - Repeated call preserve last-value wins.
+//
+// Note that methods are defined on Set, although this returns Set. Callers
+// can avoid memory allocations by:
+//
+// - allocating a Sortable for use as a temporary in this method
+// - allocating a Set for storing the return value of this constructor.
+//
+// The result maintains a cache of encoded attributes, by attribute.EncoderID.
+// This value should not be copied after its first use.
+//
+// The second []KeyValue return value is a list of attributes that were
+// excluded by the Filter (if non-nil).
+//
+// Deprecated: Use [NewSetWithFiltered] instead.
+func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) {
+ return NewSetWithFiltered(kvs, filter)
+}
+
+// filteredToFront filters slice in-place using keep function. All KeyValues that need to
+// be removed are moved to the front. All KeyValues that need to be kept are
+// moved (in-order) to the back. The index for the first KeyValue to be kept is
+// returned.
+func filteredToFront(slice []KeyValue, keep Filter) int {
+ n := len(slice)
+ j := n
+ for i := n - 1; i >= 0; i-- {
+ if keep(slice[i]) {
+ j--
+ slice[i], slice[j] = slice[j], slice[i]
+ }
+ }
+ return j
+}
+
+// Filter returns a filtered copy of this Set. See the documentation for
+// NewSetWithSortableFiltered for more details.
+func (l *Set) Filter(re Filter) (Set, []KeyValue) {
+ if re == nil {
+ return *l, nil
+ }
+
+ // Iterate in reverse to the first attribute that will be filtered out.
+ n := l.Len()
+ first := n - 1
+ for ; first >= 0; first-- {
+ kv, _ := l.Get(first)
+ if !re(kv) {
+ break
+ }
+ }
+
+ // No attributes will be dropped, return the immutable Set l and nil.
+ if first < 0 {
+ return *l, nil
+ }
+
+ // Copy now that we know we need to return a modified set.
+ //
+ // Do not do this in-place on the underlying storage of *Set l. Sets are
+ // immutable and filtering should not change this.
+ slice := l.ToSlice()
+
+ // Don't re-iterate the slice if only slice[0] is filtered.
+ if first == 0 {
+ // It is safe to assume len(slice) >= 1 given we found at least one
+ // attribute above that needs to be filtered out.
+ return Set{equivalent: computeDistinct(slice[1:])}, slice[:1]
+ }
+
+ // Move the filtered slice[first] to the front (preserving order).
+ kv := slice[first]
+ copy(slice[1:first+1], slice[:first])
+ slice[0] = kv
+
+ // Do not re-evaluate re(slice[first+1:]).
+ div := filteredToFront(slice[1:first+1], re) + 1
+ return Set{equivalent: computeDistinct(slice[div:])}, slice[:div]
+}
+
+// computeDistinct returns a Distinct using either the fixed- or
+// reflect-oriented code path, depending on the size of the input. The input
+// slice is assumed to already be sorted and de-duplicated.
+func computeDistinct(kvs []KeyValue) Distinct {
+ iface := computeDistinctFixed(kvs)
+ if iface == nil {
+ iface = computeDistinctReflect(kvs)
+ }
+ return Distinct{
+ iface: iface,
+ }
+}
+
+// computeDistinctFixed computes a Distinct for small slices. It returns nil
+// if the input is too large for this code path.
+func computeDistinctFixed(kvs []KeyValue) interface{} {
+ switch len(kvs) {
+ case 1:
+ return [1]KeyValue(kvs)
+ case 2:
+ return [2]KeyValue(kvs)
+ case 3:
+ return [3]KeyValue(kvs)
+ case 4:
+ return [4]KeyValue(kvs)
+ case 5:
+ return [5]KeyValue(kvs)
+ case 6:
+ return [6]KeyValue(kvs)
+ case 7:
+ return [7]KeyValue(kvs)
+ case 8:
+ return [8]KeyValue(kvs)
+ case 9:
+ return [9]KeyValue(kvs)
+ case 10:
+ return [10]KeyValue(kvs)
+ default:
+ return nil
+ }
+}
+
+// computeDistinctReflect computes a Distinct using reflection, works for any
+// size input.
+func computeDistinctReflect(kvs []KeyValue) interface{} {
+ at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
+ for i, keyValue := range kvs {
+ *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
+ }
+ return at.Interface()
+}
+
+// MarshalJSON returns the JSON encoding of the Set.
+func (l *Set) MarshalJSON() ([]byte, error) {
+ return json.Marshal(l.equivalent.iface)
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Set.
+func (l Set) MarshalLog() interface{} {
+ kvs := make(map[string]string)
+ for _, kv := range l.ToSlice() {
+ kvs[string(kv.Key)] = kv.Value.Emit()
+ }
+ return kvs
+}
+
+// Len implements sort.Interface.
+func (l *Sortable) Len() int {
+ return len(*l)
+}
+
+// Swap implements sort.Interface.
+func (l *Sortable) Swap(i, j int) {
+ (*l)[i], (*l)[j] = (*l)[j], (*l)[i]
+}
+
+// Less implements sort.Interface.
+func (l *Sortable) Less(i, j int) bool {
+ return (*l)[i].Key < (*l)[j].Key
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go
new file mode 100644
index 000000000..e584b2477
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go
@@ -0,0 +1,31 @@
+// Code generated by "stringer -type=Type"; DO NOT EDIT.
+
+package attribute
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[INVALID-0]
+ _ = x[BOOL-1]
+ _ = x[INT64-2]
+ _ = x[FLOAT64-3]
+ _ = x[STRING-4]
+ _ = x[BOOLSLICE-5]
+ _ = x[INT64SLICE-6]
+ _ = x[FLOAT64SLICE-7]
+ _ = x[STRINGSLICE-8]
+}
+
+const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE"
+
+var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71}
+
+func (i Type) String() string {
+ if i < 0 || i >= Type(len(_Type_index)-1) {
+ return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Type_name[_Type_index[i]:_Type_index[i+1]]
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go
new file mode 100644
index 000000000..817eecacf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/value.go
@@ -0,0 +1,270 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+
+ attribute "go.opentelemetry.io/otel/attribute/internal"
+)
+
+//go:generate stringer -type=Type
+
+// Type describes the type of the data Value holds.
+type Type int // nolint: revive // redefines builtin Type.
+
+// Value represents the value part in key-value pairs.
+type Value struct {
+ vtype Type
+ numeric uint64
+ stringly string
+ slice interface{}
+}
+
+const (
+ // INVALID is used for a Value with no value set.
+ INVALID Type = iota
+ // BOOL is a boolean Type Value.
+ BOOL
+ // INT64 is a 64-bit signed integral Type Value.
+ INT64
+ // FLOAT64 is a 64-bit floating point Type Value.
+ FLOAT64
+ // STRING is a string Type Value.
+ STRING
+ // BOOLSLICE is a slice of booleans Type Value.
+ BOOLSLICE
+ // INT64SLICE is a slice of 64-bit signed integral numbers Type Value.
+ INT64SLICE
+ // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value.
+ FLOAT64SLICE
+ // STRINGSLICE is a slice of strings Type Value.
+ STRINGSLICE
+)
+
+// BoolValue creates a BOOL Value.
+func BoolValue(v bool) Value {
+ return Value{
+ vtype: BOOL,
+ numeric: boolToRaw(v),
+ }
+}
+
+// BoolSliceValue creates a BOOLSLICE Value.
+func BoolSliceValue(v []bool) Value {
+ return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)}
+}
+
+// IntValue creates an INT64 Value.
+func IntValue(v int) Value {
+ return Int64Value(int64(v))
+}
+
+// IntSliceValue creates an INTSLICE Value.
+func IntSliceValue(v []int) Value {
+ var int64Val int64
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val)))
+ for i, val := range v {
+ cp.Elem().Index(i).SetInt(int64(val))
+ }
+ return Value{
+ vtype: INT64SLICE,
+ slice: cp.Elem().Interface(),
+ }
+}
+
+// Int64Value creates an INT64 Value.
+func Int64Value(v int64) Value {
+ return Value{
+ vtype: INT64,
+ numeric: int64ToRaw(v),
+ }
+}
+
+// Int64SliceValue creates an INT64SLICE Value.
+func Int64SliceValue(v []int64) Value {
+ return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)}
+}
+
+// Float64Value creates a FLOAT64 Value.
+func Float64Value(v float64) Value {
+ return Value{
+ vtype: FLOAT64,
+ numeric: float64ToRaw(v),
+ }
+}
+
+// Float64SliceValue creates a FLOAT64SLICE Value.
+func Float64SliceValue(v []float64) Value {
+ return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)}
+}
+
+// StringValue creates a STRING Value.
+func StringValue(v string) Value {
+ return Value{
+ vtype: STRING,
+ stringly: v,
+ }
+}
+
+// StringSliceValue creates a STRINGSLICE Value.
+func StringSliceValue(v []string) Value {
+ return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)}
+}
+
+// Type returns a type of the Value.
+func (v Value) Type() Type {
+ return v.vtype
+}
+
+// AsBool returns the bool value. Make sure that the Value's type is
+// BOOL.
+func (v Value) AsBool() bool {
+ return rawToBool(v.numeric)
+}
+
+// AsBoolSlice returns the []bool value. Make sure that the Value's type is
+// BOOLSLICE.
+func (v Value) AsBoolSlice() []bool {
+ if v.vtype != BOOLSLICE {
+ return nil
+ }
+ return v.asBoolSlice()
+}
+
+func (v Value) asBoolSlice() []bool {
+ return attribute.AsBoolSlice(v.slice)
+}
+
+// AsInt64 returns the int64 value. Make sure that the Value's type is
+// INT64.
+func (v Value) AsInt64() int64 {
+ return rawToInt64(v.numeric)
+}
+
+// AsInt64Slice returns the []int64 value. Make sure that the Value's type is
+// INT64SLICE.
+func (v Value) AsInt64Slice() []int64 {
+ if v.vtype != INT64SLICE {
+ return nil
+ }
+ return v.asInt64Slice()
+}
+
+func (v Value) asInt64Slice() []int64 {
+ return attribute.AsInt64Slice(v.slice)
+}
+
+// AsFloat64 returns the float64 value. Make sure that the Value's
+// type is FLOAT64.
+func (v Value) AsFloat64() float64 {
+ return rawToFloat64(v.numeric)
+}
+
+// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is
+// FLOAT64SLICE.
+func (v Value) AsFloat64Slice() []float64 {
+ if v.vtype != FLOAT64SLICE {
+ return nil
+ }
+ return v.asFloat64Slice()
+}
+
+func (v Value) asFloat64Slice() []float64 {
+ return attribute.AsFloat64Slice(v.slice)
+}
+
+// AsString returns the string value. Make sure that the Value's type
+// is STRING.
+func (v Value) AsString() string {
+ return v.stringly
+}
+
+// AsStringSlice returns the []string value. Make sure that the Value's type is
+// STRINGSLICE.
+func (v Value) AsStringSlice() []string {
+ if v.vtype != STRINGSLICE {
+ return nil
+ }
+ return v.asStringSlice()
+}
+
+func (v Value) asStringSlice() []string {
+ return attribute.AsStringSlice(v.slice)
+}
+
+type unknownValueType struct{}
+
+// AsInterface returns Value's data as interface{}.
+func (v Value) AsInterface() interface{} {
+ switch v.Type() {
+ case BOOL:
+ return v.AsBool()
+ case BOOLSLICE:
+ return v.asBoolSlice()
+ case INT64:
+ return v.AsInt64()
+ case INT64SLICE:
+ return v.asInt64Slice()
+ case FLOAT64:
+ return v.AsFloat64()
+ case FLOAT64SLICE:
+ return v.asFloat64Slice()
+ case STRING:
+ return v.stringly
+ case STRINGSLICE:
+ return v.asStringSlice()
+ }
+ return unknownValueType{}
+}
+
+// Emit returns a string representation of Value's data.
+func (v Value) Emit() string {
+ switch v.Type() {
+ case BOOLSLICE:
+ return fmt.Sprint(v.asBoolSlice())
+ case BOOL:
+ return strconv.FormatBool(v.AsBool())
+ case INT64SLICE:
+ j, err := json.Marshal(v.asInt64Slice())
+ if err != nil {
+ return fmt.Sprintf("invalid: %v", v.asInt64Slice())
+ }
+ return string(j)
+ case INT64:
+ return strconv.FormatInt(v.AsInt64(), 10)
+ case FLOAT64SLICE:
+ j, err := json.Marshal(v.asFloat64Slice())
+ if err != nil {
+ return fmt.Sprintf("invalid: %v", v.asFloat64Slice())
+ }
+ return string(j)
+ case FLOAT64:
+ return fmt.Sprint(v.AsFloat64())
+ case STRINGSLICE:
+ j, err := json.Marshal(v.asStringSlice())
+ if err != nil {
+ return fmt.Sprintf("invalid: %v", v.asStringSlice())
+ }
+ return string(j)
+ case STRING:
+ return v.stringly
+ default:
+ return "unknown"
+ }
+}
+
+// MarshalJSON returns the JSON encoding of the Value.
+func (v Value) MarshalJSON() ([]byte, error) {
+ var jsonVal struct {
+ Type string
+ Value interface{}
+ }
+ jsonVal.Type = v.Type().String()
+ jsonVal.Value = v.AsInterface()
+ return json.Marshal(jsonVal)
+}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/README.md b/vendor/go.opentelemetry.io/otel/baggage/README.md
new file mode 100644
index 000000000..7d798435e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/baggage/README.md
@@ -0,0 +1,3 @@
+# Baggage
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/baggage)
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
new file mode 100644
index 000000000..0e1fe2422
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -0,0 +1,1018 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package baggage // import "go.opentelemetry.io/otel/baggage"
+
+import (
+ "errors"
+ "fmt"
+ "net/url"
+ "strings"
+ "unicode/utf8"
+
+ "go.opentelemetry.io/otel/internal/baggage"
+)
+
+const (
+ maxMembers = 180
+ maxBytesPerMembers = 4096
+ maxBytesPerBaggageString = 8192
+
+ listDelimiter = ","
+ keyValueDelimiter = "="
+ propertyDelimiter = ";"
+)
+
+var (
+ errInvalidKey = errors.New("invalid key")
+ errInvalidValue = errors.New("invalid value")
+ errInvalidProperty = errors.New("invalid baggage list-member property")
+ errInvalidMember = errors.New("invalid baggage list-member")
+ errMemberNumber = errors.New("too many list-members in baggage-string")
+ errMemberBytes = errors.New("list-member too large")
+ errBaggageBytes = errors.New("baggage-string too large")
+)
+
+// Property is an additional metadata entry for a baggage list-member.
+type Property struct {
+ key, value string
+
+ // hasValue indicates if a zero-value value means the property does not
+ // have a value or if it was the zero-value.
+ hasValue bool
+}
+
+// NewKeyProperty returns a new Property for key.
+//
+// The passed key must be valid, non-empty UTF-8 string.
+// If key is invalid, an error will be returned.
+// However, the specific Propagators that are used to transmit baggage entries across
+// component boundaries may impose their own restrictions on Property key.
+// For example, the W3C Baggage specification restricts the Property keys to strings that
+// satisfy the token definition from RFC7230, Section 3.2.6.
+// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key.
+func NewKeyProperty(key string) (Property, error) {
+ if !validateBaggageName(key) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ }
+
+ p := Property{key: key}
+ return p, nil
+}
+
+// NewKeyValueProperty returns a new Property for key with value.
+//
+// The passed key must be compliant with W3C Baggage specification.
+// The passed value must be percent-encoded as defined in W3C Baggage specification.
+//
+// Notice: Consider using [NewKeyValuePropertyRaw] instead
+// that does not require percent-encoding of the value.
+func NewKeyValueProperty(key, value string) (Property, error) {
+ if !validateKey(key) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ }
+
+ if !validateValue(value) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
+ decodedValue, err := url.PathUnescape(value)
+ if err != nil {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
+ return NewKeyValuePropertyRaw(key, decodedValue)
+}
+
+// NewKeyValuePropertyRaw returns a new Property for key with value.
+//
+// The passed key must be valid, non-empty UTF-8 string.
+// The passed value must be valid UTF-8 string.
+// However, the specific Propagators that are used to transmit baggage entries across
+// component boundaries may impose their own restrictions on Property key.
+// For example, the W3C Baggage specification restricts the Property keys to strings that
+// satisfy the token definition from RFC7230, Section 3.2.6.
+// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key.
+func NewKeyValuePropertyRaw(key, value string) (Property, error) {
+ if !validateBaggageName(key) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ }
+ if !validateBaggageValue(value) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
+
+ p := Property{
+ key: key,
+ value: value,
+ hasValue: true,
+ }
+ return p, nil
+}
+
+func newInvalidProperty() Property {
+ return Property{}
+}
+
+// parseProperty attempts to decode a Property from the passed string. It
+// returns an error if the input is invalid according to the W3C Baggage
+// specification.
+func parseProperty(property string) (Property, error) {
+ if property == "" {
+ return newInvalidProperty(), nil
+ }
+
+ p, ok := parsePropertyInternal(property)
+ if !ok {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property)
+ }
+
+ return p, nil
+}
+
+// validate ensures p conforms to the W3C Baggage specification, returning an
+// error otherwise.
+func (p Property) validate() error {
+ errFunc := func(err error) error {
+ return fmt.Errorf("invalid property: %w", err)
+ }
+
+ if !validateBaggageName(p.key) {
+ return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
+ }
+ if !p.hasValue && p.value != "" {
+ return errFunc(errors.New("inconsistent value"))
+ }
+ if p.hasValue && !validateBaggageValue(p.value) {
+ return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
+ }
+ return nil
+}
+
+// Key returns the Property key.
+func (p Property) Key() string {
+ return p.key
+}
+
+// Value returns the Property value. Additionally, a boolean value is returned
+// indicating if the returned value is the empty if the Property has a value
+// that is empty or if the value is not set.
+func (p Property) Value() (string, bool) {
+ return p.value, p.hasValue
+}
+
+// String encodes Property into a header string compliant with the W3C Baggage
+// specification.
+// It would return empty string if the key is invalid with the W3C Baggage
+// specification. This could happen for a UTF-8 key, as it may contain
+// invalid characters.
+func (p Property) String() string {
+ // W3C Baggage specification does not allow percent-encoded keys.
+ if !validateKey(p.key) {
+ return ""
+ }
+
+ if p.hasValue {
+ return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value))
+ }
+ return p.key
+}
+
+type properties []Property
+
+func fromInternalProperties(iProps []baggage.Property) properties {
+ if len(iProps) == 0 {
+ return nil
+ }
+
+ props := make(properties, len(iProps))
+ for i, p := range iProps {
+ props[i] = Property{
+ key: p.Key,
+ value: p.Value,
+ hasValue: p.HasValue,
+ }
+ }
+ return props
+}
+
+func (p properties) asInternal() []baggage.Property {
+ if len(p) == 0 {
+ return nil
+ }
+
+ iProps := make([]baggage.Property, len(p))
+ for i, prop := range p {
+ iProps[i] = baggage.Property{
+ Key: prop.key,
+ Value: prop.value,
+ HasValue: prop.hasValue,
+ }
+ }
+ return iProps
+}
+
+func (p properties) Copy() properties {
+ if len(p) == 0 {
+ return nil
+ }
+
+ props := make(properties, len(p))
+ copy(props, p)
+ return props
+}
+
+// validate ensures each Property in p conforms to the W3C Baggage
+// specification, returning an error otherwise.
+func (p properties) validate() error {
+ for _, prop := range p {
+ if err := prop.validate(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// String encodes properties into a header string compliant with the W3C Baggage
+// specification.
+func (p properties) String() string {
+ props := make([]string, 0, len(p))
+ for _, prop := range p {
+ s := prop.String()
+
+ // Ignored empty properties.
+ if s != "" {
+ props = append(props, s)
+ }
+ }
+ return strings.Join(props, propertyDelimiter)
+}
+
+// Member is a list-member of a baggage-string as defined by the W3C Baggage
+// specification.
+type Member struct {
+ key, value string
+ properties properties
+
+ // hasData indicates whether the created property contains data or not.
+ // Properties that do not contain data are invalid with no other check
+ // required.
+ hasData bool
+}
+
+// NewMember returns a new Member from the passed arguments.
+//
+// The passed key must be compliant with W3C Baggage specification.
+// The passed value must be percent-encoded as defined in W3C Baggage specification.
+//
+// Notice: Consider using [NewMemberRaw] instead
+// that does not require percent-encoding of the value.
+func NewMember(key, value string, props ...Property) (Member, error) {
+ if !validateKey(key) {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ }
+
+ if !validateValue(value) {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
+ decodedValue, err := url.PathUnescape(value)
+ if err != nil {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
+ return NewMemberRaw(key, decodedValue, props...)
+}
+
+// NewMemberRaw returns a new Member from the passed arguments.
+//
+// The passed key must be valid, non-empty UTF-8 string.
+// The passed value must be valid UTF-8 string.
+// However, the specific Propagators that are used to transmit baggage entries across
+// component boundaries may impose their own restrictions on baggage key.
+// For example, the W3C Baggage specification restricts the baggage keys to strings that
+// satisfy the token definition from RFC7230, Section 3.2.6.
+// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key.
+func NewMemberRaw(key, value string, props ...Property) (Member, error) {
+ m := Member{
+ key: key,
+ value: value,
+ properties: properties(props).Copy(),
+ hasData: true,
+ }
+ if err := m.validate(); err != nil {
+ return newInvalidMember(), err
+ }
+ return m, nil
+}
+
+func newInvalidMember() Member {
+ return Member{}
+}
+
+// parseMember attempts to decode a Member from the passed string. It returns
+// an error if the input is invalid according to the W3C Baggage
+// specification.
+func parseMember(member string) (Member, error) {
+ if n := len(member); n > maxBytesPerMembers {
+ return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n)
+ }
+
+ var props properties
+ keyValue, properties, found := strings.Cut(member, propertyDelimiter)
+ if found {
+ // Parse the member properties.
+ for _, pStr := range strings.Split(properties, propertyDelimiter) {
+ p, err := parseProperty(pStr)
+ if err != nil {
+ return newInvalidMember(), err
+ }
+ props = append(props, p)
+ }
+ }
+ // Parse the member key/value pair.
+
+ // Take into account a value can contain equal signs (=).
+ k, v, found := strings.Cut(keyValue, keyValueDelimiter)
+ if !found {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member)
+ }
+ // "Leading and trailing whitespaces are allowed but MUST be trimmed
+ // when converting the header into a data structure."
+ key := strings.TrimSpace(k)
+ if !validateKey(key) {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ }
+
+ rawVal := strings.TrimSpace(v)
+ if !validateValue(rawVal) {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v)
+ }
+
+ // Decode a percent-encoded value.
+ unescapeVal, err := url.PathUnescape(rawVal)
+ if err != nil {
+ return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err)
+ }
+
+ value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
+ return Member{key: key, value: value, properties: props, hasData: true}, nil
+}
+
+// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'.
+func replaceInvalidUTF8Sequences(c int, unescapeVal string) string {
+ if utf8.ValidString(unescapeVal) {
+ return unescapeVal
+ }
+ // W3C baggage spec:
+ // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69
+
+ var b strings.Builder
+ b.Grow(c)
+ for i := 0; i < len(unescapeVal); {
+ r, size := utf8.DecodeRuneInString(unescapeVal[i:])
+ if r == utf8.RuneError && size == 1 {
+ // Invalid UTF-8 sequence found, replace it with '�'
+ _, _ = b.WriteString("�")
+ } else {
+ _, _ = b.WriteRune(r)
+ }
+ i += size
+ }
+
+ return b.String()
+}
+
+// validate ensures m conforms to the W3C Baggage specification.
+// A key must be an ASCII string, returning an error otherwise.
+func (m Member) validate() error {
+ if !m.hasData {
+ return fmt.Errorf("%w: %q", errInvalidMember, m)
+ }
+
+ if !validateBaggageName(m.key) {
+ return fmt.Errorf("%w: %q", errInvalidKey, m.key)
+ }
+ if !validateBaggageValue(m.value) {
+ return fmt.Errorf("%w: %q", errInvalidValue, m.value)
+ }
+ return m.properties.validate()
+}
+
+// Key returns the Member key.
+func (m Member) Key() string { return m.key }
+
+// Value returns the Member value.
+func (m Member) Value() string { return m.value }
+
+// Properties returns a copy of the Member properties.
+func (m Member) Properties() []Property { return m.properties.Copy() }
+
+// String encodes Member into a header string compliant with the W3C Baggage
+// specification.
+// It would return empty string if the key is invalid with the W3C Baggage
+// specification. This could happen for a UTF-8 key, as it may contain
+// invalid characters.
+func (m Member) String() string {
+ // W3C Baggage specification does not allow percent-encoded keys.
+ if !validateKey(m.key) {
+ return ""
+ }
+
+ s := m.key + keyValueDelimiter + valueEscape(m.value)
+ if len(m.properties) > 0 {
+ s += propertyDelimiter + m.properties.String()
+ }
+ return s
+}
+
+// Baggage is a list of baggage members representing the baggage-string as
+// defined by the W3C Baggage specification.
+type Baggage struct { //nolint:golint
+ list baggage.List
+}
+
+// New returns a new valid Baggage. It returns an error if it results in a
+// Baggage exceeding limits set in that specification.
+//
+// It expects all the provided members to have already been validated.
+func New(members ...Member) (Baggage, error) {
+ if len(members) == 0 {
+ return Baggage{}, nil
+ }
+
+ b := make(baggage.List)
+ for _, m := range members {
+ if !m.hasData {
+ return Baggage{}, errInvalidMember
+ }
+
+ // OpenTelemetry resolves duplicates by last-one-wins.
+ b[m.key] = baggage.Item{
+ Value: m.value,
+ Properties: m.properties.asInternal(),
+ }
+ }
+
+ // Check member numbers after deduplication.
+ if len(b) > maxMembers {
+ return Baggage{}, errMemberNumber
+ }
+
+ bag := Baggage{b}
+ if n := len(bag.String()); n > maxBytesPerBaggageString {
+ return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
+ }
+
+ return bag, nil
+}
+
+// Parse attempts to decode a baggage-string from the passed string. It
+// returns an error if the input is invalid according to the W3C Baggage
+// specification.
+//
+// If there are duplicate list-members contained in baggage, the last one
+// defined (reading left-to-right) will be the only one kept. This diverges
+// from the W3C Baggage specification which allows duplicate list-members, but
+// conforms to the OpenTelemetry Baggage specification.
+func Parse(bStr string) (Baggage, error) {
+ if bStr == "" {
+ return Baggage{}, nil
+ }
+
+ if n := len(bStr); n > maxBytesPerBaggageString {
+ return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
+ }
+
+ b := make(baggage.List)
+ for _, memberStr := range strings.Split(bStr, listDelimiter) {
+ m, err := parseMember(memberStr)
+ if err != nil {
+ return Baggage{}, err
+ }
+ // OpenTelemetry resolves duplicates by last-one-wins.
+ b[m.key] = baggage.Item{
+ Value: m.value,
+ Properties: m.properties.asInternal(),
+ }
+ }
+
+ // OpenTelemetry does not allow for duplicate list-members, but the W3C
+ // specification does. Now that we have deduplicated, ensure the baggage
+ // does not exceed list-member limits.
+ if len(b) > maxMembers {
+ return Baggage{}, errMemberNumber
+ }
+
+ return Baggage{b}, nil
+}
+
+// Member returns the baggage list-member identified by key.
+//
+// If there is no list-member matching the passed key the returned Member will
+// be a zero-value Member.
+// The returned member is not validated, as we assume the validation happened
+// when it was added to the Baggage.
+func (b Baggage) Member(key string) Member {
+ v, ok := b.list[key]
+ if !ok {
+ // We do not need to worry about distinguishing between the situation
+ // where a zero-valued Member is included in the Baggage because a
+ // zero-valued Member is invalid according to the W3C Baggage
+ // specification (it has an empty key).
+ return newInvalidMember()
+ }
+
+ return Member{
+ key: key,
+ value: v.Value,
+ properties: fromInternalProperties(v.Properties),
+ hasData: true,
+ }
+}
+
+// Members returns all the baggage list-members.
+// The order of the returned list-members is not significant.
+//
+// The returned members are not validated, as we assume the validation happened
+// when they were added to the Baggage.
+func (b Baggage) Members() []Member {
+ if len(b.list) == 0 {
+ return nil
+ }
+
+ members := make([]Member, 0, len(b.list))
+ for k, v := range b.list {
+ members = append(members, Member{
+ key: k,
+ value: v.Value,
+ properties: fromInternalProperties(v.Properties),
+ hasData: true,
+ })
+ }
+ return members
+}
+
+// SetMember returns a copy of the Baggage with the member included. If the
+// baggage contains a Member with the same key, the existing Member is
+// replaced.
+//
+// If member is invalid according to the W3C Baggage specification, an error
+// is returned with the original Baggage.
+func (b Baggage) SetMember(member Member) (Baggage, error) {
+ if !member.hasData {
+ return b, errInvalidMember
+ }
+
+ n := len(b.list)
+ if _, ok := b.list[member.key]; !ok {
+ n++
+ }
+ list := make(baggage.List, n)
+
+ for k, v := range b.list {
+ // Do not copy if we are just going to overwrite.
+ if k == member.key {
+ continue
+ }
+ list[k] = v
+ }
+
+ list[member.key] = baggage.Item{
+ Value: member.value,
+ Properties: member.properties.asInternal(),
+ }
+
+ return Baggage{list: list}, nil
+}
+
+// DeleteMember returns a copy of the Baggage with the list-member identified
+// by key removed.
+func (b Baggage) DeleteMember(key string) Baggage {
+ n := len(b.list)
+ if _, ok := b.list[key]; ok {
+ n--
+ }
+ list := make(baggage.List, n)
+
+ for k, v := range b.list {
+ if k == key {
+ continue
+ }
+ list[k] = v
+ }
+
+ return Baggage{list: list}
+}
+
+// Len returns the number of list-members in the Baggage.
+func (b Baggage) Len() int {
+ return len(b.list)
+}
+
+// String encodes Baggage into a header string compliant with the W3C Baggage
+// specification.
+// It would ignore members where the member key is invalid with the W3C Baggage
+// specification. This could happen for a UTF-8 key, as it may contain
+// invalid characters.
+func (b Baggage) String() string {
+ members := make([]string, 0, len(b.list))
+ for k, v := range b.list {
+ s := Member{
+ key: k,
+ value: v.Value,
+ properties: fromInternalProperties(v.Properties),
+ }.String()
+
+ // Ignored empty members.
+ if s != "" {
+ members = append(members, s)
+ }
+ }
+ return strings.Join(members, listDelimiter)
+}
+
+// parsePropertyInternal attempts to decode a Property from the passed string.
+// It follows the spec at https://www.w3.org/TR/baggage/#definition.
+func parsePropertyInternal(s string) (p Property, ok bool) {
+ // For the entire function we will use " key = value " as an example.
+ // Attempting to parse the key.
+ // First skip spaces at the beginning "< >key = value " (they could be empty).
+ index := skipSpace(s, 0)
+
+ // Parse the key: " = value ".
+ keyStart := index
+ keyEnd := index
+ for _, c := range s[keyStart:] {
+ if !validateKeyChar(c) {
+ break
+ }
+ keyEnd++
+ }
+
+ // If we couldn't find any valid key character,
+ // it means the key is either empty or invalid.
+ if keyStart == keyEnd {
+ return
+ }
+
+ // Skip spaces after the key: " key< >= value ".
+ index = skipSpace(s, keyEnd)
+
+ if index == len(s) {
+ // A key can have no value, like: " key ".
+ ok = true
+ p.key = s[keyStart:keyEnd]
+ return
+ }
+
+ // If we have not reached the end and we can't find the '=' delimiter,
+ // it means the property is invalid.
+ if s[index] != keyValueDelimiter[0] {
+ return
+ }
+
+ // Attempting to parse the value.
+ // Match: " key =< >value ".
+ index = skipSpace(s, index+1)
+
+ // Match the value string: " key = ".
+ // A valid property can be: " key =".
+ // Therefore, we don't have to check if the value is empty.
+ valueStart := index
+ valueEnd := index
+ for _, c := range s[valueStart:] {
+ if !validateValueChar(c) {
+ break
+ }
+ valueEnd++
+ }
+
+ // Skip all trailing whitespaces: " key = value< >".
+ index = skipSpace(s, valueEnd)
+
+ // If after looking for the value and skipping whitespaces
+ // we have not reached the end, it means the property is
+ // invalid, something like: " key = value value1".
+ if index != len(s) {
+ return
+ }
+
+ // Decode a percent-encoded value.
+ rawVal := s[valueStart:valueEnd]
+ unescapeVal, err := url.PathUnescape(rawVal)
+ if err != nil {
+ return
+ }
+ value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
+
+ ok = true
+ p.key = s[keyStart:keyEnd]
+ p.hasValue = true
+
+ p.value = value
+ return
+}
+
+func skipSpace(s string, offset int) int {
+ i := offset
+ for ; i < len(s); i++ {
+ c := s[i]
+ if c != ' ' && c != '\t' {
+ break
+ }
+ }
+ return i
+}
+
+var safeKeyCharset = [utf8.RuneSelf]bool{
+ // 0x23 to 0x27
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+
+ // 0x30 to 0x39
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+
+ // 0x41 to 0x5a
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'V': true,
+ 'W': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+
+ // 0x5e to 0x7a
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+
+ // remainder
+ '!': true,
+ '*': true,
+ '+': true,
+ '-': true,
+ '.': true,
+ '|': true,
+ '~': true,
+}
+
+// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name.
+// Baggage name is a valid, non-empty UTF-8 string.
+func validateBaggageName(s string) bool {
+ if len(s) == 0 {
+ return false
+ }
+
+ return utf8.ValidString(s)
+}
+
+// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value.
+// Baggage value is a valid UTF-8 strings.
+// Empty string is also a valid UTF-8 string.
+func validateBaggageValue(s string) bool {
+ return utf8.ValidString(s)
+}
+
+// validateKey checks if the string is a valid W3C Baggage key.
+func validateKey(s string) bool {
+ if len(s) == 0 {
+ return false
+ }
+
+ for _, c := range s {
+ if !validateKeyChar(c) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func validateKeyChar(c int32) bool {
+ return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c]
+}
+
+// validateValue checks if the string is a valid W3C Baggage value.
+func validateValue(s string) bool {
+ for _, c := range s {
+ if !validateValueChar(c) {
+ return false
+ }
+ }
+
+ return true
+}
+
+var safeValueCharset = [utf8.RuneSelf]bool{
+ '!': true, // 0x21
+
+ // 0x23 to 0x2b
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '(': true,
+ ')': true,
+ '*': true,
+ '+': true,
+
+ // 0x2d to 0x3a
+ '-': true,
+ '.': true,
+ '/': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ ':': true,
+
+ // 0x3c to 0x5b
+ '<': true, // 0x3C
+ '=': true, // 0x3D
+ '>': true, // 0x3E
+ '?': true, // 0x3F
+ '@': true, // 0x40
+ 'A': true, // 0x41
+ 'B': true, // 0x42
+ 'C': true, // 0x43
+ 'D': true, // 0x44
+ 'E': true, // 0x45
+ 'F': true, // 0x46
+ 'G': true, // 0x47
+ 'H': true, // 0x48
+ 'I': true, // 0x49
+ 'J': true, // 0x4A
+ 'K': true, // 0x4B
+ 'L': true, // 0x4C
+ 'M': true, // 0x4D
+ 'N': true, // 0x4E
+ 'O': true, // 0x4F
+ 'P': true, // 0x50
+ 'Q': true, // 0x51
+ 'R': true, // 0x52
+ 'S': true, // 0x53
+ 'T': true, // 0x54
+ 'U': true, // 0x55
+ 'V': true, // 0x56
+ 'W': true, // 0x57
+ 'X': true, // 0x58
+ 'Y': true, // 0x59
+ 'Z': true, // 0x5A
+ '[': true, // 0x5B
+
+ // 0x5d to 0x7e
+ ']': true, // 0x5D
+ '^': true, // 0x5E
+ '_': true, // 0x5F
+ '`': true, // 0x60
+ 'a': true, // 0x61
+ 'b': true, // 0x62
+ 'c': true, // 0x63
+ 'd': true, // 0x64
+ 'e': true, // 0x65
+ 'f': true, // 0x66
+ 'g': true, // 0x67
+ 'h': true, // 0x68
+ 'i': true, // 0x69
+ 'j': true, // 0x6A
+ 'k': true, // 0x6B
+ 'l': true, // 0x6C
+ 'm': true, // 0x6D
+ 'n': true, // 0x6E
+ 'o': true, // 0x6F
+ 'p': true, // 0x70
+ 'q': true, // 0x71
+ 'r': true, // 0x72
+ 's': true, // 0x73
+ 't': true, // 0x74
+ 'u': true, // 0x75
+ 'v': true, // 0x76
+ 'w': true, // 0x77
+ 'x': true, // 0x78
+ 'y': true, // 0x79
+ 'z': true, // 0x7A
+ '{': true, // 0x7B
+ '|': true, // 0x7C
+ '}': true, // 0x7D
+ '~': true, // 0x7E
+}
+
+func validateValueChar(c int32) bool {
+ return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c]
+}
+
+// valueEscape escapes the string so it can be safely placed inside a baggage value,
+// replacing special characters with %XX sequences as needed.
+//
+// The implementation is based on:
+// https://github.com/golang/go/blob/f6509cf5cdbb5787061b784973782933c47f1782/src/net/url/url.go#L285.
+func valueEscape(s string) string {
+ hexCount := 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if shouldEscape(c) {
+ hexCount++
+ }
+ }
+
+ if hexCount == 0 {
+ return s
+ }
+
+ var buf [64]byte
+ var t []byte
+
+ required := len(s) + 2*hexCount
+ if required <= len(buf) {
+ t = buf[:required]
+ } else {
+ t = make([]byte, required)
+ }
+
+ j := 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if shouldEscape(s[i]) {
+ const upperhex = "0123456789ABCDEF"
+ t[j] = '%'
+ t[j+1] = upperhex[c>>4]
+ t[j+2] = upperhex[c&15]
+ j += 3
+ } else {
+ t[j] = c
+ j++
+ }
+ }
+
+ return string(t)
+}
+
+// shouldEscape returns true if the specified byte should be escaped when
+// appearing in a baggage value string.
+func shouldEscape(c byte) bool {
+ if c == '%' {
+ // The percent character must be encoded so that percent-encoding can work.
+ return true
+ }
+ return !validateValueChar(int32(c))
+}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go
new file mode 100644
index 000000000..a572461a0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/baggage/context.go
@@ -0,0 +1,28 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package baggage // import "go.opentelemetry.io/otel/baggage"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/internal/baggage"
+)
+
+// ContextWithBaggage returns a copy of parent with baggage.
+func ContextWithBaggage(parent context.Context, b Baggage) context.Context {
+ // Delegate so any hooks for the OpenTracing bridge are handled.
+ return baggage.ContextWithList(parent, b.list)
+}
+
+// ContextWithoutBaggage returns a copy of parent with no baggage.
+func ContextWithoutBaggage(parent context.Context) context.Context {
+ // Delegate so any hooks for the OpenTracing bridge are handled.
+ return baggage.ContextWithList(parent, nil)
+}
+
+// FromContext returns the baggage contained in ctx.
+func FromContext(ctx context.Context) Baggage {
+ // Delegate so any hooks for the OpenTracing bridge are handled.
+ return Baggage{list: baggage.ListFromContext(ctx)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go
new file mode 100644
index 000000000..b51d87cab
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package baggage provides functionality for storing and retrieving
+baggage items in Go context. For propagating the baggage, see the
+go.opentelemetry.io/otel/propagation package.
+*/
+package baggage // import "go.opentelemetry.io/otel/baggage"
diff --git a/vendor/go.opentelemetry.io/otel/codes/README.md b/vendor/go.opentelemetry.io/otel/codes/README.md
new file mode 100644
index 000000000..24c52b387
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/codes/README.md
@@ -0,0 +1,3 @@
+# Codes
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/codes)
diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go
new file mode 100644
index 000000000..49a35b122
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/codes/codes.go
@@ -0,0 +1,106 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package codes // import "go.opentelemetry.io/otel/codes"
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strconv"
+)
+
+const (
+ // Unset is the default status code.
+ Unset Code = 0
+
+ // Error indicates the operation contains an error.
+ //
+ // NOTE: The error code in OTLP is 2.
+ // The value of this enum is only relevant to the internals
+ // of the Go SDK.
+ Error Code = 1
+
+ // Ok indicates operation has been validated by an Application developers
+ // or Operator to have completed successfully, or contain no error.
+ //
+ // NOTE: The Ok code in OTLP is 1.
+ // The value of this enum is only relevant to the internals
+ // of the Go SDK.
+ Ok Code = 2
+
+ maxCode = 3
+)
+
+// Code is an 32-bit representation of a status state.
+type Code uint32
+
+var codeToStr = map[Code]string{
+ Unset: "Unset",
+ Error: "Error",
+ Ok: "Ok",
+}
+
+var strToCode = map[string]Code{
+ `"Unset"`: Unset,
+ `"Error"`: Error,
+ `"Ok"`: Ok,
+}
+
+// String returns the Code as a string.
+func (c Code) String() string {
+ return codeToStr[c]
+}
+
+// UnmarshalJSON unmarshals b into the Code.
+//
+// This is based on the functionality in the gRPC codes package:
+// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244
+func (c *Code) UnmarshalJSON(b []byte) error {
+ // From json.Unmarshaler: By convention, to approximate the behavior of
+ // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
+ // a no-op.
+ if string(b) == "null" {
+ return nil
+ }
+ if c == nil {
+ return errors.New("nil receiver passed to UnmarshalJSON")
+ }
+
+ var x interface{}
+ if err := json.Unmarshal(b, &x); err != nil {
+ return err
+ }
+ switch x.(type) {
+ case string:
+ if jc, ok := strToCode[string(b)]; ok {
+ *c = jc
+ return nil
+ }
+ return fmt.Errorf("invalid code: %q", string(b))
+ case float64:
+ if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
+ if ci >= maxCode {
+ return fmt.Errorf("invalid code: %q", ci)
+ }
+
+ *c = Code(ci) // nolint: gosec // Bit size of 32 check above.
+ return nil
+ }
+ return fmt.Errorf("invalid code: %q", string(b))
+ default:
+ return fmt.Errorf("invalid code: %q", string(b))
+ }
+}
+
+// MarshalJSON returns c as the JSON encoding of c.
+func (c *Code) MarshalJSON() ([]byte, error) {
+ if c == nil {
+ return []byte("null"), nil
+ }
+ str, ok := codeToStr[*c]
+ if !ok {
+ return nil, fmt.Errorf("invalid code: %d", *c)
+ }
+ return []byte(fmt.Sprintf("%q", str)), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go
new file mode 100644
index 000000000..ee8db448b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/codes/doc.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package codes defines the canonical error codes used by OpenTelemetry.
+
+It conforms to [the OpenTelemetry
+specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status).
+*/
+package codes // import "go.opentelemetry.io/otel/codes"
diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
new file mode 100644
index 000000000..935bd4876
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
@@ -0,0 +1,4 @@
+# This is a renovate-friendly source of Docker images.
+FROM python:3.13.5-slim-bullseye@sha256:5b9fc0d8ef79cfb5f300e61cb516e0c668067bbf77646762c38c94107e230dbc AS python
+FROM otel/weaver:v0.15.2@sha256:b13acea09f721774daba36344861f689ac4bb8d6ecd94c4600b4d590c8fb34b9 AS weaver
+FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown
diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go
new file mode 100644
index 000000000..921f85961
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/doc.go
@@ -0,0 +1,25 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package otel provides global access to the OpenTelemetry API. The subpackages of
+the otel package provide an implementation of the OpenTelemetry API.
+
+The provided API is used to instrument code and measure data about that code's
+performance and operation. The measured data, by default, is not processed or
+transmitted anywhere. An implementation of the OpenTelemetry SDK, like the
+default SDK implementation (go.opentelemetry.io/otel/sdk), and associated
+exporters are used to process and transport this data.
+
+To read the getting started guide, see https://opentelemetry.io/docs/languages/go/getting-started/.
+
+To read more about tracing, see go.opentelemetry.io/otel/trace.
+
+To read more about metrics, see go.opentelemetry.io/otel/metric.
+
+To read more about logs, see go.opentelemetry.io/otel/log.
+
+To read more about propagation, see go.opentelemetry.io/otel/propagation and
+go.opentelemetry.io/otel/baggage.
+*/
+package otel // import "go.opentelemetry.io/otel"
diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go
new file mode 100644
index 000000000..67414c71e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/error_handler.go
@@ -0,0 +1,27 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otel // import "go.opentelemetry.io/otel"
+
+// ErrorHandler handles irremediable events.
+type ErrorHandler interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Handle handles any error deemed irremediable by an OpenTelemetry
+ // component.
+ Handle(error)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// ErrorHandlerFunc is a convenience adapter to allow the use of a function
+// as an ErrorHandler.
+type ErrorHandlerFunc func(error)
+
+var _ ErrorHandler = ErrorHandlerFunc(nil)
+
+// Handle handles the irremediable error by calling the ErrorHandlerFunc itself.
+func (f ErrorHandlerFunc) Handle(err error) {
+ f(err)
+}
diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go
new file mode 100644
index 000000000..07623b679
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/handler.go
@@ -0,0 +1,33 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otel // import "go.opentelemetry.io/otel"
+
+import (
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// Compile-time check global.ErrDelegator implements ErrorHandler.
+var _ ErrorHandler = (*global.ErrDelegator)(nil)
+
+// GetErrorHandler returns the global ErrorHandler instance.
+//
+// The default ErrorHandler instance returned will log all errors to STDERR
+// until an override ErrorHandler is set with SetErrorHandler. All
+// ErrorHandler returned prior to this will automatically forward errors to
+// the set instance instead of logging.
+//
+// Subsequent calls to SetErrorHandler after the first will not forward errors
+// to the new ErrorHandler for prior returned instances.
+func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() }
+
+// SetErrorHandler sets the global ErrorHandler to h.
+//
+// The first time this is called all ErrorHandler previously returned from
+// GetErrorHandler will send errors to h instead of the default logging
+// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
+// delegate errors to h.
+func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) }
+
+// Handle is a convenience function for GetErrorHandler().Handle(err).
+func Handle(err error) { global.GetErrorHandler().Handle(err) }
diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
new file mode 100644
index 000000000..b4f85f44a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
@@ -0,0 +1,32 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package baggage provides base types and functionality to store and retrieve
+baggage in Go context. This package exists because the OpenTracing bridge to
+OpenTelemetry needs to synchronize state whenever baggage for a context is
+modified and that context contains an OpenTracing span. If it were not for
+this need this package would not need to exist and the
+`go.opentelemetry.io/otel/baggage` package would be the singular place where
+W3C baggage is handled.
+*/
+package baggage // import "go.opentelemetry.io/otel/internal/baggage"
+
+// List is the collection of baggage members. The W3C allows for duplicates,
+// but OpenTelemetry does not, therefore, this is represented as a map.
+type List map[string]Item
+
+// Item is the value and metadata properties part of a list-member.
+type Item struct {
+ Value string
+ Properties []Property
+}
+
+// Property is a metadata entry for a list-member.
+type Property struct {
+ Key, Value string
+
+ // HasValue indicates if a zero-value value means the property does not
+ // have a value or if it was the zero-value.
+ HasValue bool
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go
new file mode 100644
index 000000000..3aea9c491
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go
@@ -0,0 +1,81 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package baggage // import "go.opentelemetry.io/otel/internal/baggage"
+
+import "context"
+
+type baggageContextKeyType int
+
+const baggageKey baggageContextKeyType = iota
+
+// SetHookFunc is a callback called when storing baggage in the context.
+type SetHookFunc func(context.Context, List) context.Context
+
+// GetHookFunc is a callback called when getting baggage from the context.
+type GetHookFunc func(context.Context, List) List
+
+type baggageState struct {
+ list List
+
+ setHook SetHookFunc
+ getHook GetHookFunc
+}
+
+// ContextWithSetHook returns a copy of parent with hook configured to be
+// invoked every time ContextWithBaggage is called.
+//
+// Passing nil SetHookFunc creates a context with no set hook to call.
+func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context {
+ var s baggageState
+ if v, ok := parent.Value(baggageKey).(baggageState); ok {
+ s = v
+ }
+
+ s.setHook = hook
+ return context.WithValue(parent, baggageKey, s)
+}
+
+// ContextWithGetHook returns a copy of parent with hook configured to be
+// invoked every time FromContext is called.
+//
+// Passing nil GetHookFunc creates a context with no get hook to call.
+func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context {
+ var s baggageState
+ if v, ok := parent.Value(baggageKey).(baggageState); ok {
+ s = v
+ }
+
+ s.getHook = hook
+ return context.WithValue(parent, baggageKey, s)
+}
+
+// ContextWithList returns a copy of parent with baggage. Passing nil list
+// returns a context without any baggage.
+func ContextWithList(parent context.Context, list List) context.Context {
+ var s baggageState
+ if v, ok := parent.Value(baggageKey).(baggageState); ok {
+ s = v
+ }
+
+ s.list = list
+ ctx := context.WithValue(parent, baggageKey, s)
+ if s.setHook != nil {
+ ctx = s.setHook(ctx, list)
+ }
+
+ return ctx
+}
+
+// ListFromContext returns the baggage contained in ctx.
+func ListFromContext(ctx context.Context) List {
+ switch v := ctx.Value(baggageKey).(type) {
+ case baggageState:
+ if v.getHook != nil {
+ return v.getHook(ctx, v.list)
+ }
+ return v.list
+ default:
+ return nil
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go
new file mode 100644
index 000000000..2e47b2964
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go
@@ -0,0 +1,37 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package global provides the OpenTelemetry global API.
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+ "log"
+ "sync/atomic"
+)
+
+// ErrorHandler handles irremediable events.
+type ErrorHandler interface {
+ // Handle handles any error deemed irremediable by an OpenTelemetry
+ // component.
+ Handle(error)
+}
+
+type ErrDelegator struct {
+ delegate atomic.Pointer[ErrorHandler]
+}
+
+// Compile-time check that delegator implements ErrorHandler.
+var _ ErrorHandler = (*ErrDelegator)(nil)
+
+func (d *ErrDelegator) Handle(err error) {
+ if eh := d.delegate.Load(); eh != nil {
+ (*eh).Handle(err)
+ return
+ }
+ log.Print(err)
+}
+
+// setDelegate sets the ErrorHandler delegate.
+func (d *ErrDelegator) setDelegate(eh ErrorHandler) {
+ d.delegate.Store(&eh)
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
new file mode 100644
index 000000000..ae92a4251
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
@@ -0,0 +1,412 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+ "context"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+// unwrapper unwraps to return the underlying instrument implementation.
+type unwrapper interface {
+ unwrap() metric.Observable
+}
+
+type afCounter struct {
+ embedded.Float64ObservableCounter
+ metric.Float64Observable
+
+ name string
+ opts []metric.Float64ObservableCounterOption
+
+ delegate atomic.Value // metric.Float64ObservableCounter
+}
+
+var (
+ _ unwrapper = (*afCounter)(nil)
+ _ metric.Float64ObservableCounter = (*afCounter)(nil)
+)
+
+func (i *afCounter) setDelegate(m metric.Meter) {
+ ctr, err := m.Float64ObservableCounter(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *afCounter) unwrap() metric.Observable {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Float64ObservableCounter)
+ }
+ return nil
+}
+
+type afUpDownCounter struct {
+ embedded.Float64ObservableUpDownCounter
+ metric.Float64Observable
+
+ name string
+ opts []metric.Float64ObservableUpDownCounterOption
+
+ delegate atomic.Value // metric.Float64ObservableUpDownCounter
+}
+
+var (
+ _ unwrapper = (*afUpDownCounter)(nil)
+ _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil)
+)
+
+func (i *afUpDownCounter) setDelegate(m metric.Meter) {
+ ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *afUpDownCounter) unwrap() metric.Observable {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Float64ObservableUpDownCounter)
+ }
+ return nil
+}
+
+type afGauge struct {
+ embedded.Float64ObservableGauge
+ metric.Float64Observable
+
+ name string
+ opts []metric.Float64ObservableGaugeOption
+
+ delegate atomic.Value // metric.Float64ObservableGauge
+}
+
+var (
+ _ unwrapper = (*afGauge)(nil)
+ _ metric.Float64ObservableGauge = (*afGauge)(nil)
+)
+
+func (i *afGauge) setDelegate(m metric.Meter) {
+ ctr, err := m.Float64ObservableGauge(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *afGauge) unwrap() metric.Observable {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Float64ObservableGauge)
+ }
+ return nil
+}
+
+type aiCounter struct {
+ embedded.Int64ObservableCounter
+ metric.Int64Observable
+
+ name string
+ opts []metric.Int64ObservableCounterOption
+
+ delegate atomic.Value // metric.Int64ObservableCounter
+}
+
+var (
+ _ unwrapper = (*aiCounter)(nil)
+ _ metric.Int64ObservableCounter = (*aiCounter)(nil)
+)
+
+func (i *aiCounter) setDelegate(m metric.Meter) {
+ ctr, err := m.Int64ObservableCounter(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *aiCounter) unwrap() metric.Observable {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Int64ObservableCounter)
+ }
+ return nil
+}
+
+type aiUpDownCounter struct {
+ embedded.Int64ObservableUpDownCounter
+ metric.Int64Observable
+
+ name string
+ opts []metric.Int64ObservableUpDownCounterOption
+
+ delegate atomic.Value // metric.Int64ObservableUpDownCounter
+}
+
+var (
+ _ unwrapper = (*aiUpDownCounter)(nil)
+ _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil)
+)
+
+func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
+ ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *aiUpDownCounter) unwrap() metric.Observable {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Int64ObservableUpDownCounter)
+ }
+ return nil
+}
+
+type aiGauge struct {
+ embedded.Int64ObservableGauge
+ metric.Int64Observable
+
+ name string
+ opts []metric.Int64ObservableGaugeOption
+
+ delegate atomic.Value // metric.Int64ObservableGauge
+}
+
+var (
+ _ unwrapper = (*aiGauge)(nil)
+ _ metric.Int64ObservableGauge = (*aiGauge)(nil)
+)
+
+func (i *aiGauge) setDelegate(m metric.Meter) {
+ ctr, err := m.Int64ObservableGauge(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *aiGauge) unwrap() metric.Observable {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Int64ObservableGauge)
+ }
+ return nil
+}
+
+// Sync Instruments.
+type sfCounter struct {
+ embedded.Float64Counter
+
+ name string
+ opts []metric.Float64CounterOption
+
+ delegate atomic.Value // metric.Float64Counter
+}
+
+var _ metric.Float64Counter = (*sfCounter)(nil)
+
+func (i *sfCounter) setDelegate(m metric.Meter) {
+ ctr, err := m.Float64Counter(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Float64Counter).Add(ctx, incr, opts...)
+ }
+}
+
+type sfUpDownCounter struct {
+ embedded.Float64UpDownCounter
+
+ name string
+ opts []metric.Float64UpDownCounterOption
+
+ delegate atomic.Value // metric.Float64UpDownCounter
+}
+
+var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil)
+
+func (i *sfUpDownCounter) setDelegate(m metric.Meter) {
+ ctr, err := m.Float64UpDownCounter(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...)
+ }
+}
+
+type sfHistogram struct {
+ embedded.Float64Histogram
+
+ name string
+ opts []metric.Float64HistogramOption
+
+ delegate atomic.Value // metric.Float64Histogram
+}
+
+var _ metric.Float64Histogram = (*sfHistogram)(nil)
+
+func (i *sfHistogram) setDelegate(m metric.Meter) {
+ ctr, err := m.Float64Histogram(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Float64Histogram).Record(ctx, x, opts...)
+ }
+}
+
+type sfGauge struct {
+ embedded.Float64Gauge
+
+ name string
+ opts []metric.Float64GaugeOption
+
+ delegate atomic.Value // metric.Float64Gauge
+}
+
+var _ metric.Float64Gauge = (*sfGauge)(nil)
+
+func (i *sfGauge) setDelegate(m metric.Meter) {
+ ctr, err := m.Float64Gauge(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Float64Gauge).Record(ctx, x, opts...)
+ }
+}
+
+type siCounter struct {
+ embedded.Int64Counter
+
+ name string
+ opts []metric.Int64CounterOption
+
+ delegate atomic.Value // metric.Int64Counter
+}
+
+var _ metric.Int64Counter = (*siCounter)(nil)
+
+func (i *siCounter) setDelegate(m metric.Meter) {
+ ctr, err := m.Int64Counter(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Int64Counter).Add(ctx, x, opts...)
+ }
+}
+
+type siUpDownCounter struct {
+ embedded.Int64UpDownCounter
+
+ name string
+ opts []metric.Int64UpDownCounterOption
+
+ delegate atomic.Value // metric.Int64UpDownCounter
+}
+
+var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil)
+
+func (i *siUpDownCounter) setDelegate(m metric.Meter) {
+ ctr, err := m.Int64UpDownCounter(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...)
+ }
+}
+
+type siHistogram struct {
+ embedded.Int64Histogram
+
+ name string
+ opts []metric.Int64HistogramOption
+
+ delegate atomic.Value // metric.Int64Histogram
+}
+
+var _ metric.Int64Histogram = (*siHistogram)(nil)
+
+func (i *siHistogram) setDelegate(m metric.Meter) {
+ ctr, err := m.Int64Histogram(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Int64Histogram).Record(ctx, x, opts...)
+ }
+}
+
+type siGauge struct {
+ embedded.Int64Gauge
+
+ name string
+ opts []metric.Int64GaugeOption
+
+ delegate atomic.Value // metric.Int64Gauge
+}
+
+var _ metric.Int64Gauge = (*siGauge)(nil)
+
+func (i *siGauge) setDelegate(m metric.Meter) {
+ ctr, err := m.Int64Gauge(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Int64Gauge).Record(ctx, x, opts...)
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
new file mode 100644
index 000000000..adbca7d34
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
@@ -0,0 +1,62 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+ "log"
+ "os"
+ "sync/atomic"
+
+ "github.com/go-logr/logr"
+ "github.com/go-logr/stdr"
+)
+
+// globalLogger holds a reference to the [logr.Logger] used within
+// go.opentelemetry.io/otel.
+//
+// The default logger uses stdr which is backed by the standard `log.Logger`
+// interface. This logger will only show messages at the Error Level.
+var globalLogger = func() *atomic.Pointer[logr.Logger] {
+ l := stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))
+
+ p := new(atomic.Pointer[logr.Logger])
+ p.Store(&l)
+ return p
+}()
+
+// SetLogger sets the global Logger to l.
+//
+// To see Warn messages use a logger with `l.V(1).Enabled() == true`
+// To see Info messages use a logger with `l.V(4).Enabled() == true`
+// To see Debug messages use a logger with `l.V(8).Enabled() == true`.
+func SetLogger(l logr.Logger) {
+ globalLogger.Store(&l)
+}
+
+// GetLogger returns the global logger.
+func GetLogger() logr.Logger {
+ return *globalLogger.Load()
+}
+
+// Info prints messages about the general state of the API or SDK.
+// This should usually be less than 5 messages a minute.
+func Info(msg string, keysAndValues ...interface{}) {
+ GetLogger().V(4).Info(msg, keysAndValues...)
+}
+
+// Error prints messages about exceptional states of the API or SDK.
+func Error(err error, msg string, keysAndValues ...interface{}) {
+ GetLogger().Error(err, msg, keysAndValues...)
+}
+
+// Debug prints messages about all internal changes in the API or SDK.
+func Debug(msg string, keysAndValues ...interface{}) {
+ GetLogger().V(8).Info(msg, keysAndValues...)
+}
+
+// Warn prints messages about warnings in the API or SDK.
+// Not an error but is likely more important than an informational event.
+func Warn(msg string, keysAndValues ...interface{}) {
+ GetLogger().V(1).Info(msg, keysAndValues...)
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
new file mode 100644
index 000000000..adb37b5b0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
@@ -0,0 +1,625 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+ "container/list"
+ "context"
+ "reflect"
+ "sync"
+
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+// meterProvider is a placeholder for a configured SDK MeterProvider.
+//
+// All MeterProvider functionality is forwarded to a delegate once
+// configured.
+type meterProvider struct {
+ embedded.MeterProvider
+
+ mtx sync.Mutex
+ meters map[il]*meter
+
+ delegate metric.MeterProvider
+}
+
+// setDelegate configures p to delegate all MeterProvider functionality to
+// provider.
+//
+// All Meters provided prior to this function call are switched out to be
+// Meters provided by provider. All instruments and callbacks are recreated and
+// delegated.
+//
+// It is guaranteed by the caller that this happens only once.
+func (p *meterProvider) setDelegate(provider metric.MeterProvider) {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+
+ p.delegate = provider
+
+ if len(p.meters) == 0 {
+ return
+ }
+
+ for _, meter := range p.meters {
+ meter.setDelegate(provider)
+ }
+
+ p.meters = nil
+}
+
+// Meter implements MeterProvider.
+func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+
+ if p.delegate != nil {
+ return p.delegate.Meter(name, opts...)
+ }
+
+ // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map.
+
+ c := metric.NewMeterConfig(opts...)
+ key := il{
+ name: name,
+ version: c.InstrumentationVersion(),
+ schema: c.SchemaURL(),
+ attrs: c.InstrumentationAttributes(),
+ }
+
+ if p.meters == nil {
+ p.meters = make(map[il]*meter)
+ }
+
+ if val, ok := p.meters[key]; ok {
+ return val
+ }
+
+ t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)}
+ p.meters[key] = t
+ return t
+}
+
+// meter is a placeholder for a metric.Meter.
+//
+// All Meter functionality is forwarded to a delegate once configured.
+// Otherwise, all functionality is forwarded to a NoopMeter.
+type meter struct {
+ embedded.Meter
+
+ name string
+ opts []metric.MeterOption
+
+ mtx sync.Mutex
+ instruments map[instID]delegatedInstrument
+
+ registry list.List
+
+ delegate metric.Meter
+}
+
+type delegatedInstrument interface {
+ setDelegate(metric.Meter)
+}
+
+// instID are the identifying properties of a instrument.
+type instID struct {
+ // name is the name of the stream.
+ name string
+ // description is the description of the stream.
+ description string
+ // kind defines the functional group of the instrument.
+ kind reflect.Type
+ // unit is the unit of the stream.
+ unit string
+}
+
+// setDelegate configures m to delegate all Meter functionality to Meters
+// created by provider.
+//
+// All subsequent calls to the Meter methods will be passed to the delegate.
+//
+// It is guaranteed by the caller that this happens only once.
+func (m *meter) setDelegate(provider metric.MeterProvider) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ meter := provider.Meter(m.name, m.opts...)
+ m.delegate = meter
+
+ for _, inst := range m.instruments {
+ inst.setDelegate(meter)
+ }
+
+ var n *list.Element
+ for e := m.registry.Front(); e != nil; e = n {
+ r := e.Value.(*registration)
+ r.setDelegate(meter)
+ n = e.Next()
+ m.registry.Remove(e)
+ }
+
+ m.instruments = nil
+ m.registry.Init()
+}
+
+func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64Counter(name, options...)
+ }
+
+ cfg := metric.NewInt64CounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*siCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64Counter), nil
+ }
+ i := &siCounter{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Int64UpDownCounter(
+ name string,
+ options ...metric.Int64UpDownCounterOption,
+) (metric.Int64UpDownCounter, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64UpDownCounter(name, options...)
+ }
+
+ cfg := metric.NewInt64UpDownCounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*siUpDownCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64UpDownCounter), nil
+ }
+ i := &siUpDownCounter{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64Histogram(name, options...)
+ }
+
+ cfg := metric.NewInt64HistogramConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*siHistogram)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64Histogram), nil
+ }
+ i := &siHistogram{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64Gauge(name, options...)
+ }
+
+ cfg := metric.NewInt64GaugeConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*siGauge)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64Gauge), nil
+ }
+ i := &siGauge{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Int64ObservableCounter(
+ name string,
+ options ...metric.Int64ObservableCounterOption,
+) (metric.Int64ObservableCounter, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64ObservableCounter(name, options...)
+ }
+
+ cfg := metric.NewInt64ObservableCounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*aiCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64ObservableCounter), nil
+ }
+ i := &aiCounter{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Int64ObservableUpDownCounter(
+ name string,
+ options ...metric.Int64ObservableUpDownCounterOption,
+) (metric.Int64ObservableUpDownCounter, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64ObservableUpDownCounter(name, options...)
+ }
+
+ cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*aiUpDownCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64ObservableUpDownCounter), nil
+ }
+ i := &aiUpDownCounter{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Int64ObservableGauge(
+ name string,
+ options ...metric.Int64ObservableGaugeOption,
+) (metric.Int64ObservableGauge, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64ObservableGauge(name, options...)
+ }
+
+ cfg := metric.NewInt64ObservableGaugeConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*aiGauge)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64ObservableGauge), nil
+ }
+ i := &aiGauge{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64Counter(name, options...)
+ }
+
+ cfg := metric.NewFloat64CounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*sfCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64Counter), nil
+ }
+ i := &sfCounter{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Float64UpDownCounter(
+ name string,
+ options ...metric.Float64UpDownCounterOption,
+) (metric.Float64UpDownCounter, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64UpDownCounter(name, options...)
+ }
+
+ cfg := metric.NewFloat64UpDownCounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*sfUpDownCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64UpDownCounter), nil
+ }
+ i := &sfUpDownCounter{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Float64Histogram(
+ name string,
+ options ...metric.Float64HistogramOption,
+) (metric.Float64Histogram, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64Histogram(name, options...)
+ }
+
+ cfg := metric.NewFloat64HistogramConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*sfHistogram)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64Histogram), nil
+ }
+ i := &sfHistogram{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64Gauge(name, options...)
+ }
+
+ cfg := metric.NewFloat64GaugeConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*sfGauge)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64Gauge), nil
+ }
+ i := &sfGauge{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Float64ObservableCounter(
+ name string,
+ options ...metric.Float64ObservableCounterOption,
+) (metric.Float64ObservableCounter, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64ObservableCounter(name, options...)
+ }
+
+ cfg := metric.NewFloat64ObservableCounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*afCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64ObservableCounter), nil
+ }
+ i := &afCounter{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Float64ObservableUpDownCounter(
+ name string,
+ options ...metric.Float64ObservableUpDownCounterOption,
+) (metric.Float64ObservableUpDownCounter, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64ObservableUpDownCounter(name, options...)
+ }
+
+ cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*afUpDownCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64ObservableUpDownCounter), nil
+ }
+ i := &afUpDownCounter{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Float64ObservableGauge(
+ name string,
+ options ...metric.Float64ObservableGaugeOption,
+) (metric.Float64ObservableGauge, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64ObservableGauge(name, options...)
+ }
+
+ cfg := metric.NewFloat64ObservableGaugeConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*afGauge)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64ObservableGauge), nil
+ }
+ i := &afGauge{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+// RegisterCallback captures the function that will be called during Collect.
+func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...)
+ }
+
+ reg := ®istration{instruments: insts, function: f}
+ e := m.registry.PushBack(reg)
+ reg.unreg = func() error {
+ m.mtx.Lock()
+ _ = m.registry.Remove(e)
+ m.mtx.Unlock()
+ return nil
+ }
+ return reg, nil
+}
+
+func unwrapInstruments(instruments []metric.Observable) []metric.Observable {
+ out := make([]metric.Observable, 0, len(instruments))
+
+ for _, inst := range instruments {
+ if in, ok := inst.(unwrapper); ok {
+ out = append(out, in.unwrap())
+ } else {
+ out = append(out, inst)
+ }
+ }
+
+ return out
+}
+
+type registration struct {
+ embedded.Registration
+
+ instruments []metric.Observable
+ function metric.Callback
+
+ unreg func() error
+ unregMu sync.Mutex
+}
+
+type unwrapObs struct {
+ embedded.Observer
+ obs metric.Observer
+}
+
+// unwrapFloat64Observable returns an expected metric.Float64Observable after
+// unwrapping the global object.
+func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable {
+ if unwrapped, ok := inst.(unwrapper); ok {
+ if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok {
+ // Note: if the unwrapped object does not
+ // unwrap as an observable for either of the
+ // predicates here, it means an internal bug in
+ // this package. We avoid logging an error in
+ // this case, because the SDK has to try its
+ // own type conversion on the object. The SDK
+ // will see this and be forced to respond with
+ // its own error.
+ //
+ // This code uses a double-nested if statement
+ // to avoid creating a branch that is
+ // impossible to cover.
+ inst = floatObs
+ }
+ }
+ return inst
+}
+
+// unwrapInt64Observable returns an expected metric.Int64Observable after
+// unwrapping the global object.
+func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable {
+ if unwrapped, ok := inst.(unwrapper); ok {
+ if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok {
+ // See the comment in unwrapFloat64Observable().
+ inst = unint
+ }
+ }
+ return inst
+}
+
+func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) {
+ uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...)
+}
+
+func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) {
+ uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...)
+}
+
+func unwrapCallback(f metric.Callback) metric.Callback {
+ return func(ctx context.Context, obs metric.Observer) error {
+ return f(ctx, &unwrapObs{obs: obs})
+ }
+}
+
+func (c *registration) setDelegate(m metric.Meter) {
+ c.unregMu.Lock()
+ defer c.unregMu.Unlock()
+
+ if c.unreg == nil {
+ // Unregister already called.
+ return
+ }
+
+ reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+
+ c.unreg = reg.Unregister
+}
+
+func (c *registration) Unregister() error {
+ c.unregMu.Lock()
+ defer c.unregMu.Unlock()
+ if c.unreg == nil {
+ // Unregister already called.
+ return nil
+ }
+
+ var err error
+ err, c.unreg = c.unreg(), nil
+ return err
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go
new file mode 100644
index 000000000..38560ff99
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go
@@ -0,0 +1,71 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+ "context"
+ "sync"
+
+ "go.opentelemetry.io/otel/propagation"
+)
+
+// textMapPropagator is a default TextMapPropagator that delegates calls to a
+// registered delegate if one is set, otherwise it defaults to delegating the
+// calls to a the default no-op propagation.TextMapPropagator.
+type textMapPropagator struct {
+ mtx sync.Mutex
+ once sync.Once
+ delegate propagation.TextMapPropagator
+ noop propagation.TextMapPropagator
+}
+
+// Compile-time guarantee that textMapPropagator implements the
+// propagation.TextMapPropagator interface.
+var _ propagation.TextMapPropagator = (*textMapPropagator)(nil)
+
+func newTextMapPropagator() *textMapPropagator {
+ return &textMapPropagator{
+ noop: propagation.NewCompositeTextMapPropagator(),
+ }
+}
+
+// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are
+// forwarded to. Delegation can only be performed once, all subsequent calls
+// perform no delegation.
+func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) {
+ if delegate == nil {
+ return
+ }
+
+ p.mtx.Lock()
+ p.once.Do(func() { p.delegate = delegate })
+ p.mtx.Unlock()
+}
+
+// effectiveDelegate returns the current delegate of p if one is set,
+// otherwise the default noop TextMapPropagator is returned. This method
+// can be called concurrently.
+func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+ if p.delegate != nil {
+ return p.delegate
+ }
+ return p.noop
+}
+
+// Inject set cross-cutting concerns from the Context into the carrier.
+func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) {
+ p.effectiveDelegate().Inject(ctx, carrier)
+}
+
+// Extract reads cross-cutting concerns from the carrier into a Context.
+func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context {
+ return p.effectiveDelegate().Extract(ctx, carrier)
+}
+
+// Fields returns the keys whose values are set with Inject.
+func (p *textMapPropagator) Fields() []string {
+ return p.effectiveDelegate().Fields()
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go
new file mode 100644
index 000000000..204ea142a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go
@@ -0,0 +1,199 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+ "errors"
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/trace"
+)
+
+type (
+ errorHandlerHolder struct {
+ eh ErrorHandler
+ }
+
+ tracerProviderHolder struct {
+ tp trace.TracerProvider
+ }
+
+ propagatorsHolder struct {
+ tm propagation.TextMapPropagator
+ }
+
+ meterProviderHolder struct {
+ mp metric.MeterProvider
+ }
+)
+
+var (
+ globalErrorHandler = defaultErrorHandler()
+ globalTracer = defaultTracerValue()
+ globalPropagators = defaultPropagatorsValue()
+ globalMeterProvider = defaultMeterProvider()
+
+ delegateErrorHandlerOnce sync.Once
+ delegateTraceOnce sync.Once
+ delegateTextMapPropagatorOnce sync.Once
+ delegateMeterOnce sync.Once
+)
+
+// GetErrorHandler returns the global ErrorHandler instance.
+//
+// The default ErrorHandler instance returned will log all errors to STDERR
+// until an override ErrorHandler is set with SetErrorHandler. All
+// ErrorHandler returned prior to this will automatically forward errors to
+// the set instance instead of logging.
+//
+// Subsequent calls to SetErrorHandler after the first will not forward errors
+// to the new ErrorHandler for prior returned instances.
+func GetErrorHandler() ErrorHandler {
+ return globalErrorHandler.Load().(errorHandlerHolder).eh
+}
+
+// SetErrorHandler sets the global ErrorHandler to h.
+//
+// The first time this is called all ErrorHandler previously returned from
+// GetErrorHandler will send errors to h instead of the default logging
+// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
+// delegate errors to h.
+func SetErrorHandler(h ErrorHandler) {
+ current := GetErrorHandler()
+
+ if _, cOk := current.(*ErrDelegator); cOk {
+ if _, ehOk := h.(*ErrDelegator); ehOk && current == h {
+ // Do not assign to the delegate of the default ErrDelegator to be
+ // itself.
+ Error(
+ errors.New("no ErrorHandler delegate configured"),
+ "ErrorHandler remains its current value.",
+ )
+ return
+ }
+ }
+
+ delegateErrorHandlerOnce.Do(func() {
+ if def, ok := current.(*ErrDelegator); ok {
+ def.setDelegate(h)
+ }
+ })
+ globalErrorHandler.Store(errorHandlerHolder{eh: h})
+}
+
+// TracerProvider is the internal implementation for global.TracerProvider.
+func TracerProvider() trace.TracerProvider {
+ return globalTracer.Load().(tracerProviderHolder).tp
+}
+
+// SetTracerProvider is the internal implementation for global.SetTracerProvider.
+func SetTracerProvider(tp trace.TracerProvider) {
+ current := TracerProvider()
+
+ if _, cOk := current.(*tracerProvider); cOk {
+ if _, tpOk := tp.(*tracerProvider); tpOk && current == tp {
+ // Do not assign the default delegating TracerProvider to delegate
+ // to itself.
+ Error(
+ errors.New("no delegate configured in tracer provider"),
+ "Setting tracer provider to its current value. No delegate will be configured",
+ )
+ return
+ }
+ }
+
+ delegateTraceOnce.Do(func() {
+ if def, ok := current.(*tracerProvider); ok {
+ def.setDelegate(tp)
+ }
+ })
+ globalTracer.Store(tracerProviderHolder{tp: tp})
+}
+
+// TextMapPropagator is the internal implementation for global.TextMapPropagator.
+func TextMapPropagator() propagation.TextMapPropagator {
+ return globalPropagators.Load().(propagatorsHolder).tm
+}
+
+// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator.
+func SetTextMapPropagator(p propagation.TextMapPropagator) {
+ current := TextMapPropagator()
+
+ if _, cOk := current.(*textMapPropagator); cOk {
+ if _, pOk := p.(*textMapPropagator); pOk && current == p {
+ // Do not assign the default delegating TextMapPropagator to
+ // delegate to itself.
+ Error(
+ errors.New("no delegate configured in text map propagator"),
+ "Setting text map propagator to its current value. No delegate will be configured",
+ )
+ return
+ }
+ }
+
+ // For the textMapPropagator already returned by TextMapPropagator
+ // delegate to p.
+ delegateTextMapPropagatorOnce.Do(func() {
+ if def, ok := current.(*textMapPropagator); ok {
+ def.SetDelegate(p)
+ }
+ })
+ // Return p when subsequent calls to TextMapPropagator are made.
+ globalPropagators.Store(propagatorsHolder{tm: p})
+}
+
+// MeterProvider is the internal implementation for global.MeterProvider.
+func MeterProvider() metric.MeterProvider {
+ return globalMeterProvider.Load().(meterProviderHolder).mp
+}
+
+// SetMeterProvider is the internal implementation for global.SetMeterProvider.
+func SetMeterProvider(mp metric.MeterProvider) {
+ current := MeterProvider()
+ if _, cOk := current.(*meterProvider); cOk {
+ if _, mpOk := mp.(*meterProvider); mpOk && current == mp {
+ // Do not assign the default delegating MeterProvider to delegate
+ // to itself.
+ Error(
+ errors.New("no delegate configured in meter provider"),
+ "Setting meter provider to its current value. No delegate will be configured",
+ )
+ return
+ }
+ }
+
+ delegateMeterOnce.Do(func() {
+ if def, ok := current.(*meterProvider); ok {
+ def.setDelegate(mp)
+ }
+ })
+ globalMeterProvider.Store(meterProviderHolder{mp: mp})
+}
+
+func defaultErrorHandler() *atomic.Value {
+ v := &atomic.Value{}
+ v.Store(errorHandlerHolder{eh: &ErrDelegator{}})
+ return v
+}
+
+func defaultTracerValue() *atomic.Value {
+ v := &atomic.Value{}
+ v.Store(tracerProviderHolder{tp: &tracerProvider{}})
+ return v
+}
+
+func defaultPropagatorsValue() *atomic.Value {
+ v := &atomic.Value{}
+ v.Store(propagatorsHolder{tm: newTextMapPropagator()})
+ return v
+}
+
+func defaultMeterProvider() *atomic.Value {
+ v := &atomic.Value{}
+ v.Store(meterProviderHolder{mp: &meterProvider{}})
+ return v
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
new file mode 100644
index 000000000..49e4ac4fa
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
@@ -0,0 +1,231 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+/*
+This file contains the forwarding implementation of the TracerProvider used as
+the default global instance. Prior to initialization of an SDK, Tracers
+returned by the global TracerProvider will provide no-op functionality. This
+means that all Span created prior to initialization are no-op Spans.
+
+Once an SDK has been initialized, all provided no-op Tracers are swapped for
+Tracers provided by the SDK defined TracerProvider. However, any Span started
+prior to this initialization does not change its behavior. Meaning, the Span
+remains a no-op Span.
+
+The implementation to track and swap Tracers locks all new Tracer creation
+until the swap is complete. This assumes that this operation is not
+performance-critical. If that assumption is incorrect, be sure to configure an
+SDK prior to any Tracer creation.
+*/
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/auto/sdk"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// tracerProvider is a placeholder for a configured SDK TracerProvider.
+//
+// All TracerProvider functionality is forwarded to a delegate once
+// configured.
+type tracerProvider struct {
+ embedded.TracerProvider
+
+ mtx sync.Mutex
+ tracers map[il]*tracer
+ delegate trace.TracerProvider
+}
+
+// Compile-time guarantee that tracerProvider implements the TracerProvider
+// interface.
+var _ trace.TracerProvider = &tracerProvider{}
+
+// setDelegate configures p to delegate all TracerProvider functionality to
+// provider.
+//
+// All Tracers provided prior to this function call are switched out to be
+// Tracers provided by provider.
+//
+// It is guaranteed by the caller that this happens only once.
+func (p *tracerProvider) setDelegate(provider trace.TracerProvider) {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+
+ p.delegate = provider
+
+ if len(p.tracers) == 0 {
+ return
+ }
+
+ for _, t := range p.tracers {
+ t.setDelegate(provider)
+ }
+
+ p.tracers = nil
+}
+
+// Tracer implements TracerProvider.
+func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+
+ if p.delegate != nil {
+ return p.delegate.Tracer(name, opts...)
+ }
+
+ // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map.
+
+ c := trace.NewTracerConfig(opts...)
+ key := il{
+ name: name,
+ version: c.InstrumentationVersion(),
+ schema: c.SchemaURL(),
+ attrs: c.InstrumentationAttributes(),
+ }
+
+ if p.tracers == nil {
+ p.tracers = make(map[il]*tracer)
+ }
+
+ if val, ok := p.tracers[key]; ok {
+ return val
+ }
+
+ t := &tracer{name: name, opts: opts, provider: p}
+ p.tracers[key] = t
+ return t
+}
+
+type il struct {
+ name string
+ version string
+ schema string
+ attrs attribute.Set
+}
+
+// tracer is a placeholder for a trace.Tracer.
+//
+// All Tracer functionality is forwarded to a delegate once configured.
+// Otherwise, all functionality is forwarded to a NoopTracer.
+type tracer struct {
+ embedded.Tracer
+
+ name string
+ opts []trace.TracerOption
+ provider *tracerProvider
+
+ delegate atomic.Value
+}
+
+// Compile-time guarantee that tracer implements the trace.Tracer interface.
+var _ trace.Tracer = &tracer{}
+
+// setDelegate configures t to delegate all Tracer functionality to Tracers
+// created by provider.
+//
+// All subsequent calls to the Tracer methods will be passed to the delegate.
+//
+// It is guaranteed by the caller that this happens only once.
+func (t *tracer) setDelegate(provider trace.TracerProvider) {
+ t.delegate.Store(provider.Tracer(t.name, t.opts...))
+}
+
+// Start implements trace.Tracer by forwarding the call to t.delegate if
+// set, otherwise it forwards the call to a NoopTracer.
+func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
+ delegate := t.delegate.Load()
+ if delegate != nil {
+ return delegate.(trace.Tracer).Start(ctx, name, opts...)
+ }
+
+ return t.newSpan(ctx, autoInstEnabled, name, opts)
+}
+
+// autoInstEnabled determines if the auto-instrumentation SDK span is returned
+// from the tracer when not backed by a delegate and auto-instrumentation has
+// attached to this process.
+//
+// The auto-instrumentation is expected to overwrite this value to true when it
+// attaches. By default, this will point to false and mean a tracer will return
+// a nonRecordingSpan by default.
+var autoInstEnabled = new(bool)
+
+// newSpan is called by tracer.Start so auto-instrumentation can attach an eBPF
+// uprobe to this code.
+//
+// "noinline" pragma prevents the method from ever being inlined.
+//
+//go:noinline
+func (t *tracer) newSpan(
+ ctx context.Context,
+ autoSpan *bool,
+ name string,
+ opts []trace.SpanStartOption,
+) (context.Context, trace.Span) {
+ // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is
+ // so the auto-instrumentation can define a uprobe for (*t).newSpan and be
+ // provided with the address of the bool autoInstEnabled points to. It
+ // needs to be a parameter so that pointer can be reliably determined, it
+ // should not be read from the global.
+
+ if *autoSpan {
+ tracer := sdk.TracerProvider().Tracer(t.name, t.opts...)
+ return tracer.Start(ctx, name, opts...)
+ }
+
+ s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t}
+ ctx = trace.ContextWithSpan(ctx, s)
+ return ctx, s
+}
+
+// nonRecordingSpan is a minimal implementation of a Span that wraps a
+// SpanContext. It performs no operations other than to return the wrapped
+// SpanContext.
+type nonRecordingSpan struct {
+ embedded.Span
+
+ sc trace.SpanContext
+ tracer *tracer
+}
+
+var _ trace.Span = nonRecordingSpan{}
+
+// SpanContext returns the wrapped SpanContext.
+func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc }
+
+// IsRecording always returns false.
+func (nonRecordingSpan) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (nonRecordingSpan) SetStatus(codes.Code, string) {}
+
+// SetError does nothing.
+func (nonRecordingSpan) SetError(bool) {}
+
+// SetAttributes does nothing.
+func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (nonRecordingSpan) End(...trace.SpanEndOption) {}
+
+// RecordError does nothing.
+func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {}
+
+// AddEvent does nothing.
+func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {}
+
+// AddLink does nothing.
+func (nonRecordingSpan) AddLink(trace.Link) {}
+
+// SetName does nothing.
+func (nonRecordingSpan) SetName(string) {}
+
+func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider }
diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go
new file mode 100644
index 000000000..6de7f2e4d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal_logging.go
@@ -0,0 +1,15 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otel // import "go.opentelemetry.io/otel"
+
+import (
+ "github.com/go-logr/logr"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// SetLogger configures the logger used internally to opentelemetry.
+func SetLogger(logger logr.Logger) {
+ global.SetLogger(logger)
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go
new file mode 100644
index 000000000..1e6473b32
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric.go
@@ -0,0 +1,42 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otel // import "go.opentelemetry.io/otel"
+
+import (
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/metric"
+)
+
+// Meter returns a Meter from the global MeterProvider. The name must be the
+// name of the library providing instrumentation. This name may be the same as
+// the instrumented code only if that code provides built-in instrumentation.
+// If the name is empty, then a implementation defined default name will be
+// used instead.
+//
+// If this is called before a global MeterProvider is registered the returned
+// Meter will be a No-op implementation of a Meter. When a global MeterProvider
+// is registered for the first time, the returned Meter, and all the
+// instruments it has created or will create, are recreated automatically from
+// the new MeterProvider.
+//
+// This is short for GetMeterProvider().Meter(name).
+func Meter(name string, opts ...metric.MeterOption) metric.Meter {
+ return GetMeterProvider().Meter(name, opts...)
+}
+
+// GetMeterProvider returns the registered global meter provider.
+//
+// If no global GetMeterProvider has been registered, a No-op GetMeterProvider
+// implementation is returned. When a global GetMeterProvider is registered for
+// the first time, the returned GetMeterProvider, and all the Meters it has
+// created or will create, are recreated automatically from the new
+// GetMeterProvider.
+func GetMeterProvider() metric.MeterProvider {
+ return global.MeterProvider()
+}
+
+// SetMeterProvider registers mp as the global MeterProvider.
+func SetMeterProvider(mp metric.MeterProvider) {
+ global.SetMeterProvider(mp)
+}
diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE
similarity index 100%
rename from vendor/github.com/prometheus/common/LICENSE
rename to vendor/go.opentelemetry.io/otel/metric/LICENSE
diff --git a/vendor/go.opentelemetry.io/otel/metric/README.md b/vendor/go.opentelemetry.io/otel/metric/README.md
new file mode 100644
index 000000000..0cf902e01
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/README.md
@@ -0,0 +1,3 @@
+# Metric API
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/metric)
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
new file mode 100644
index 000000000..b7fc973a6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
@@ -0,0 +1,266 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+// Float64Observable describes a set of instruments used asynchronously to
+// record float64 measurements once per collection cycle. Observations of
+// these instruments are only made within a callback.
+//
+// Warning: Methods may be added to this interface in minor releases.
+type Float64Observable interface {
+ Observable
+
+ float64Observable()
+}
+
+// Float64ObservableCounter is an instrument used to asynchronously record
+// increasing float64 measurements once per collection cycle. Observations are
+// only made within a callback for this instrument. The value observed is
+// assumed the to be the cumulative sum of the count.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for
+// unimplemented methods.
+type Float64ObservableCounter interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64ObservableCounter
+
+ Float64Observable
+}
+
+// Float64ObservableCounterConfig contains options for asynchronous counter
+// instruments that record float64 values.
+type Float64ObservableCounterConfig struct {
+ description string
+ unit string
+ callbacks []Float64Callback
+}
+
+// NewFloat64ObservableCounterConfig returns a new
+// [Float64ObservableCounterConfig] with all opts applied.
+func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig {
+ var config Float64ObservableCounterConfig
+ for _, o := range opts {
+ config = o.applyFloat64ObservableCounter(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Float64ObservableCounterConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64ObservableCounterConfig) Unit() string {
+ return c.unit
+}
+
+// Callbacks returns the configured callbacks.
+func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback {
+ return c.callbacks
+}
+
+// Float64ObservableCounterOption applies options to a
+// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and
+// [InstrumentOption] for other options that can be used as a
+// Float64ObservableCounterOption.
+type Float64ObservableCounterOption interface {
+ applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig
+}
+
+// Float64ObservableUpDownCounter is an instrument used to asynchronously
+// record float64 measurements once per collection cycle. Observations are only
+// made within a callback for this instrument. The value observed is assumed
+// the to be the cumulative sum of the count.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64ObservableUpDownCounter interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64ObservableUpDownCounter
+
+ Float64Observable
+}
+
+// Float64ObservableUpDownCounterConfig contains options for asynchronous
+// counter instruments that record float64 values.
+type Float64ObservableUpDownCounterConfig struct {
+ description string
+ unit string
+ callbacks []Float64Callback
+}
+
+// NewFloat64ObservableUpDownCounterConfig returns a new
+// [Float64ObservableUpDownCounterConfig] with all opts applied.
+func NewFloat64ObservableUpDownCounterConfig(
+ opts ...Float64ObservableUpDownCounterOption,
+) Float64ObservableUpDownCounterConfig {
+ var config Float64ObservableUpDownCounterConfig
+ for _, o := range opts {
+ config = o.applyFloat64ObservableUpDownCounter(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Float64ObservableUpDownCounterConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64ObservableUpDownCounterConfig) Unit() string {
+ return c.unit
+}
+
+// Callbacks returns the configured callbacks.
+func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback {
+ return c.callbacks
+}
+
+// Float64ObservableUpDownCounterOption applies options to a
+// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and
+// [InstrumentOption] for other options that can be used as a
+// Float64ObservableUpDownCounterOption.
+type Float64ObservableUpDownCounterOption interface {
+ applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig
+}
+
+// Float64ObservableGauge is an instrument used to asynchronously record
+// instantaneous float64 measurements once per collection cycle. Observations
+// are only made within a callback for this instrument.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64ObservableGauge interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64ObservableGauge
+
+ Float64Observable
+}
+
+// Float64ObservableGaugeConfig contains options for asynchronous counter
+// instruments that record float64 values.
+type Float64ObservableGaugeConfig struct {
+ description string
+ unit string
+ callbacks []Float64Callback
+}
+
+// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig]
+// with all opts applied.
+func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig {
+ var config Float64ObservableGaugeConfig
+ for _, o := range opts {
+ config = o.applyFloat64ObservableGauge(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Float64ObservableGaugeConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64ObservableGaugeConfig) Unit() string {
+ return c.unit
+}
+
+// Callbacks returns the configured callbacks.
+func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback {
+ return c.callbacks
+}
+
+// Float64ObservableGaugeOption applies options to a
+// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and
+// [InstrumentOption] for other options that can be used as a
+// Float64ObservableGaugeOption.
+type Float64ObservableGaugeOption interface {
+ applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig
+}
+
+// Float64Observer is a recorder of float64 measurements.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64Observer interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64Observer
+
+ // Observe records the float64 value.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Observe(value float64, options ...ObserveOption)
+}
+
+// Float64Callback is a function registered with a Meter that makes
+// observations for a Float64Observable instrument it is registered with.
+// Calls to the Float64Observer record measurement values for the
+// Float64Observable.
+//
+// The function needs to complete in a finite amount of time and the deadline
+// of the passed context is expected to be honored.
+//
+// The function needs to make unique observations across all registered
+// Float64Callbacks. Meaning, it should not report measurements with the same
+// attributes as another Float64Callbacks also registered for the same
+// instrument.
+//
+// The function needs to be concurrent safe.
+type Float64Callback func(context.Context, Float64Observer) error
+
+// Float64ObservableOption applies options to float64 Observer instruments.
+type Float64ObservableOption interface {
+ Float64ObservableCounterOption
+ Float64ObservableUpDownCounterOption
+ Float64ObservableGaugeOption
+}
+
+type float64CallbackOpt struct {
+ cback Float64Callback
+}
+
+func (o float64CallbackOpt) applyFloat64ObservableCounter(
+ cfg Float64ObservableCounterConfig,
+) Float64ObservableCounterConfig {
+ cfg.callbacks = append(cfg.callbacks, o.cback)
+ return cfg
+}
+
+func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(
+ cfg Float64ObservableUpDownCounterConfig,
+) Float64ObservableUpDownCounterConfig {
+ cfg.callbacks = append(cfg.callbacks, o.cback)
+ return cfg
+}
+
+func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig {
+ cfg.callbacks = append(cfg.callbacks, o.cback)
+ return cfg
+}
+
+// WithFloat64Callback adds callback to be called for an instrument.
+func WithFloat64Callback(callback Float64Callback) Float64ObservableOption {
+ return float64CallbackOpt{callback}
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
new file mode 100644
index 000000000..4404b71a2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
@@ -0,0 +1,262 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+// Int64Observable describes a set of instruments used asynchronously to record
+// int64 measurements once per collection cycle. Observations of these
+// instruments are only made within a callback.
+//
+// Warning: Methods may be added to this interface in minor releases.
+type Int64Observable interface {
+ Observable
+
+ int64Observable()
+}
+
+// Int64ObservableCounter is an instrument used to asynchronously record
+// increasing int64 measurements once per collection cycle. Observations are
+// only made within a callback for this instrument. The value observed is
+// assumed the to be the cumulative sum of the count.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64ObservableCounter interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64ObservableCounter
+
+ Int64Observable
+}
+
+// Int64ObservableCounterConfig contains options for asynchronous counter
+// instruments that record int64 values.
+type Int64ObservableCounterConfig struct {
+ description string
+ unit string
+ callbacks []Int64Callback
+}
+
+// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig]
+// with all opts applied.
+func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig {
+ var config Int64ObservableCounterConfig
+ for _, o := range opts {
+ config = o.applyInt64ObservableCounter(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Int64ObservableCounterConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64ObservableCounterConfig) Unit() string {
+ return c.unit
+}
+
+// Callbacks returns the configured callbacks.
+func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback {
+ return c.callbacks
+}
+
+// Int64ObservableCounterOption applies options to a
+// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and
+// [InstrumentOption] for other options that can be used as an
+// Int64ObservableCounterOption.
+type Int64ObservableCounterOption interface {
+ applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig
+}
+
+// Int64ObservableUpDownCounter is an instrument used to asynchronously record
+// int64 measurements once per collection cycle. Observations are only made
+// within a callback for this instrument. The value observed is assumed the to
+// be the cumulative sum of the count.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64ObservableUpDownCounter interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64ObservableUpDownCounter
+
+ Int64Observable
+}
+
+// Int64ObservableUpDownCounterConfig contains options for asynchronous counter
+// instruments that record int64 values.
+type Int64ObservableUpDownCounterConfig struct {
+ description string
+ unit string
+ callbacks []Int64Callback
+}
+
+// NewInt64ObservableUpDownCounterConfig returns a new
+// [Int64ObservableUpDownCounterConfig] with all opts applied.
+func NewInt64ObservableUpDownCounterConfig(
+ opts ...Int64ObservableUpDownCounterOption,
+) Int64ObservableUpDownCounterConfig {
+ var config Int64ObservableUpDownCounterConfig
+ for _, o := range opts {
+ config = o.applyInt64ObservableUpDownCounter(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Int64ObservableUpDownCounterConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64ObservableUpDownCounterConfig) Unit() string {
+ return c.unit
+}
+
+// Callbacks returns the configured callbacks.
+func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback {
+ return c.callbacks
+}
+
+// Int64ObservableUpDownCounterOption applies options to a
+// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and
+// [InstrumentOption] for other options that can be used as an
+// Int64ObservableUpDownCounterOption.
+type Int64ObservableUpDownCounterOption interface {
+ applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig
+}
+
+// Int64ObservableGauge is an instrument used to asynchronously record
+// instantaneous int64 measurements once per collection cycle. Observations are
+// only made within a callback for this instrument.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64ObservableGauge interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64ObservableGauge
+
+ Int64Observable
+}
+
+// Int64ObservableGaugeConfig contains options for asynchronous counter
+// instruments that record int64 values.
+type Int64ObservableGaugeConfig struct {
+ description string
+ unit string
+ callbacks []Int64Callback
+}
+
+// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig]
+// with all opts applied.
+func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig {
+ var config Int64ObservableGaugeConfig
+ for _, o := range opts {
+ config = o.applyInt64ObservableGauge(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Int64ObservableGaugeConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64ObservableGaugeConfig) Unit() string {
+ return c.unit
+}
+
+// Callbacks returns the configured callbacks.
+func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback {
+ return c.callbacks
+}
+
+// Int64ObservableGaugeOption applies options to a
+// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and
+// [InstrumentOption] for other options that can be used as an
+// Int64ObservableGaugeOption.
+type Int64ObservableGaugeOption interface {
+ applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig
+}
+
+// Int64Observer is a recorder of int64 measurements.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64Observer interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64Observer
+
+ // Observe records the int64 value.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Observe(value int64, options ...ObserveOption)
+}
+
+// Int64Callback is a function registered with a Meter that makes observations
+// for an Int64Observable instrument it is registered with. Calls to the
+// Int64Observer record measurement values for the Int64Observable.
+//
+// The function needs to complete in a finite amount of time and the deadline
+// of the passed context is expected to be honored.
+//
+// The function needs to make unique observations across all registered
+// Int64Callbacks. Meaning, it should not report measurements with the same
+// attributes as another Int64Callbacks also registered for the same
+// instrument.
+//
+// The function needs to be concurrent safe.
+type Int64Callback func(context.Context, Int64Observer) error
+
+// Int64ObservableOption applies options to int64 Observer instruments.
+type Int64ObservableOption interface {
+ Int64ObservableCounterOption
+ Int64ObservableUpDownCounterOption
+ Int64ObservableGaugeOption
+}
+
+type int64CallbackOpt struct {
+ cback Int64Callback
+}
+
+func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig {
+ cfg.callbacks = append(cfg.callbacks, o.cback)
+ return cfg
+}
+
+func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(
+ cfg Int64ObservableUpDownCounterConfig,
+) Int64ObservableUpDownCounterConfig {
+ cfg.callbacks = append(cfg.callbacks, o.cback)
+ return cfg
+}
+
+func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig {
+ cfg.callbacks = append(cfg.callbacks, o.cback)
+ return cfg
+}
+
+// WithInt64Callback adds callback to be called for an instrument.
+func WithInt64Callback(callback Int64Callback) Int64ObservableOption {
+ return int64CallbackOpt{callback}
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go
new file mode 100644
index 000000000..d9e3b13e4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/config.go
@@ -0,0 +1,81 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// MeterConfig contains options for Meters.
+type MeterConfig struct {
+ instrumentationVersion string
+ schemaURL string
+ attrs attribute.Set
+
+ // Ensure forward compatibility by explicitly making this not comparable.
+ noCmp [0]func() //nolint: unused // This is indeed used.
+}
+
+// InstrumentationVersion returns the version of the library providing
+// instrumentation.
+func (cfg MeterConfig) InstrumentationVersion() string {
+ return cfg.instrumentationVersion
+}
+
+// InstrumentationAttributes returns the attributes associated with the library
+// providing instrumentation.
+func (cfg MeterConfig) InstrumentationAttributes() attribute.Set {
+ return cfg.attrs
+}
+
+// SchemaURL is the schema_url of the library providing instrumentation.
+func (cfg MeterConfig) SchemaURL() string {
+ return cfg.schemaURL
+}
+
+// MeterOption is an interface for applying Meter options.
+type MeterOption interface {
+ // applyMeter is used to set a MeterOption value of a MeterConfig.
+ applyMeter(MeterConfig) MeterConfig
+}
+
+// NewMeterConfig creates a new MeterConfig and applies
+// all the given options.
+func NewMeterConfig(opts ...MeterOption) MeterConfig {
+ var config MeterConfig
+ for _, o := range opts {
+ config = o.applyMeter(config)
+ }
+ return config
+}
+
+type meterOptionFunc func(MeterConfig) MeterConfig
+
+func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig {
+ return fn(cfg)
+}
+
+// WithInstrumentationVersion sets the instrumentation version.
+func WithInstrumentationVersion(version string) MeterOption {
+ return meterOptionFunc(func(config MeterConfig) MeterConfig {
+ config.instrumentationVersion = version
+ return config
+ })
+}
+
+// WithInstrumentationAttributes sets the instrumentation attributes.
+//
+// The passed attributes will be de-duplicated.
+func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption {
+ return meterOptionFunc(func(config MeterConfig) MeterConfig {
+ config.attrs = attribute.NewSet(attr...)
+ return config
+ })
+}
+
+// WithSchemaURL sets the schema URL.
+func WithSchemaURL(schemaURL string) MeterOption {
+ return meterOptionFunc(func(config MeterConfig) MeterConfig {
+ config.schemaURL = schemaURL
+ return config
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go
new file mode 100644
index 000000000..f153745b0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/doc.go
@@ -0,0 +1,177 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package metric provides the OpenTelemetry API used to measure metrics about
+source code operation.
+
+This API is separate from its implementation so the instrumentation built from
+it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official
+OpenTelemetry implementation of this API.
+
+All measurements made with this package are made via instruments. These
+instruments are created by a [Meter] which itself is created by a
+[MeterProvider]. Applications need to accept a [MeterProvider] implementation
+as a starting point when instrumenting. This can be done directly, or by using
+the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an
+appropriately named [Meter] from the accepted [MeterProvider], instrumentation
+can then be built from the [Meter]'s instruments.
+
+# Instruments
+
+Each instrument is designed to make measurements of a particular type. Broadly,
+all instruments fall into two overlapping logical categories: asynchronous or
+synchronous, and int64 or float64.
+
+All synchronous instruments ([Int64Counter], [Int64UpDownCounter],
+[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and
+[Float64Histogram]) are used to measure the operation and performance of source
+code during the source code execution. These instruments only make measurements
+when the source code they instrument is run.
+
+All asynchronous instruments ([Int64ObservableCounter],
+[Int64ObservableUpDownCounter], [Int64ObservableGauge],
+[Float64ObservableCounter], [Float64ObservableUpDownCounter], and
+[Float64ObservableGauge]) are used to measure metrics outside of the execution
+of source code. They are said to make "observations" via a callback function
+called once every measurement collection cycle.
+
+Each instrument is also grouped by the value type it measures. Either int64 or
+float64. The value being measured will dictate which instrument in these
+categories to use.
+
+Outside of these two broad categories, instruments are described by the
+function they are designed to serve. All Counters ([Int64Counter],
+[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are
+designed to measure values that never decrease in value, but instead only
+incrementally increase in value. UpDownCounters ([Int64UpDownCounter],
+[Float64UpDownCounter], [Int64ObservableUpDownCounter], and
+[Float64ObservableUpDownCounter]) on the other hand, are designed to measure
+values that can increase and decrease. When more information needs to be
+conveyed about all the synchronous measurements made during a collection cycle,
+a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally,
+when just the most recent measurement needs to be conveyed about an
+asynchronous measurement, a Gauge ([Int64ObservableGauge] and
+[Float64ObservableGauge]) should be used.
+
+See the [OpenTelemetry documentation] for more information about instruments
+and their intended use.
+
+# Instrument Name
+
+OpenTelemetry defines an [instrument name syntax] that restricts what
+instrument names are allowed.
+
+Instrument names should ...
+
+ - Not be empty.
+ - Have an alphabetic character as their first letter.
+ - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’,
+ ‘-’, or ‘/’.
+ - Have a maximum length of 255 letters.
+
+To ensure compatibility with observability platforms, all instruments created
+need to conform to this syntax. Not all implementations of the API will validate
+these names, it is the callers responsibility to ensure compliance.
+
+# Measurements
+
+Measurements are made by recording values and information about the values with
+an instrument. How these measurements are recorded depends on the instrument.
+
+Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter],
+[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and
+[Float64Histogram]) are recorded using the instrument methods directly. All
+counter instruments have an Add method that is used to measure an increment
+value, and all histogram instruments have a Record method to measure a data
+point.
+
+Asynchronous instruments ([Int64ObservableCounter],
+[Int64ObservableUpDownCounter], [Int64ObservableGauge],
+[Float64ObservableCounter], [Float64ObservableUpDownCounter], and
+[Float64ObservableGauge]) record measurements within a callback function. The
+callback is registered with the Meter which ensures the callback is called once
+per collection cycle. A callback can be registered two ways: during the
+instrument's creation using an option, or later using the RegisterCallback
+method of the [Meter] that created the instrument.
+
+If the following criteria are met, an option ([WithInt64Callback] or
+[WithFloat64Callback]) can be used during the asynchronous instrument's
+creation to register a callback ([Int64Callback] or [Float64Callback],
+respectively):
+
+ - The measurement process is known when the instrument is created
+ - Only that instrument will make a measurement within the callback
+ - The callback never needs to be unregistered
+
+If the criteria are not met, use the RegisterCallback method of the [Meter] that
+created the instrument to register a [Callback].
+
+# API Implementations
+
+This package does not conform to the standard Go versioning policy, all of its
+interfaces may have methods added to them without a package major version bump.
+This non-standard API evolution could surprise an uninformed implementation
+author. They could unknowingly build their implementation in a way that would
+result in a runtime panic for their users that update to the new API.
+
+The API is designed to help inform an instrumentation author about this
+non-standard API evolution. It requires them to choose a default behavior for
+unimplemented interface methods. There are three behavior choices they can
+make:
+
+ - Compilation failure
+ - Panic
+ - Default to another implementation
+
+All interfaces in this API embed a corresponding interface from
+[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default
+behavior of their implementations to be a compilation failure, signaling to
+their users they need to update to the latest version of that implementation,
+they need to embed the corresponding interface from
+[go.opentelemetry.io/otel/metric/embedded] in their implementation. For
+example,
+
+ import "go.opentelemetry.io/otel/metric/embedded"
+
+ type MeterProvider struct {
+ embedded.MeterProvider
+ // ...
+ }
+
+If an author wants the default behavior of their implementations to a panic,
+they need to embed the API interface directly.
+
+ import "go.opentelemetry.io/otel/metric"
+
+ type MeterProvider struct {
+ metric.MeterProvider
+ // ...
+ }
+
+This is not a recommended behavior as it could lead to publishing packages that
+contain runtime panics when users update other package that use newer versions
+of [go.opentelemetry.io/otel/metric].
+
+Finally, an author can embed another implementation in theirs. The embedded
+implementation will be used for methods not defined by the author. For example,
+an author who wants to default to silently dropping the call can use
+[go.opentelemetry.io/otel/metric/noop]:
+
+ import "go.opentelemetry.io/otel/metric/noop"
+
+ type MeterProvider struct {
+ noop.MeterProvider
+ // ...
+ }
+
+It is strongly recommended that authors only embed
+[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior.
+That implementation is the only one OpenTelemetry authors can guarantee will
+fully implement all the API interfaces when a user updates their API.
+
+[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax
+[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/
+[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider
+*/
+package metric // import "go.opentelemetry.io/otel/metric"
diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/README.md b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md
new file mode 100644
index 000000000..1f6e0efa7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md
@@ -0,0 +1,3 @@
+# Metric Embedded
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/metric/embedded)
diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
new file mode 100644
index 000000000..1a9dc6809
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
@@ -0,0 +1,243 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package embedded provides interfaces embedded within the [OpenTelemetry
+// metric API].
+//
+// Implementers of the [OpenTelemetry metric API] can embed the relevant type
+// from this package into their implementation directly. Doing so will result
+// in a compilation error for users when the [OpenTelemetry metric API] is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+//
+// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric
+package embedded // import "go.opentelemetry.io/otel/metric/embedded"
+
+// MeterProvider is embedded in
+// [go.opentelemetry.io/otel/metric.MeterProvider].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to
+// experience a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type MeterProvider interface{ meterProvider() }
+
+// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a
+// compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface
+// is extended (which is something that can happen without a major version bump
+// of the API package).
+type Meter interface{ meter() }
+
+// Float64Observer is embedded in
+// [go.opentelemetry.io/otel/metric.Float64Observer].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64Observer] if you want
+// users to experience a compilation error, signaling they need to update to
+// your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64Observer] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Float64Observer interface{ float64Observer() }
+
+// Int64Observer is embedded in
+// [go.opentelemetry.io/otel/metric.Int64Observer].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users
+// to experience a compilation error, signaling they need to update to your
+// latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64Observer] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Int64Observer interface{ int64Observer() }
+
+// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a
+// compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Observer]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Observer interface{ observer() }
+
+// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Registration] if you want users to
+// experience a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Registration]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Registration interface{ registration() }
+
+// Float64Counter is embedded in
+// [go.opentelemetry.io/otel/metric.Float64Counter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64Counter] if you want
+// users to experience a compilation error, signaling they need to update to
+// your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64Counter] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Float64Counter interface{ float64Counter() }
+
+// Float64Histogram is embedded in
+// [go.opentelemetry.io/otel/metric.Float64Histogram].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want
+// users to experience a compilation error, signaling they need to update to
+// your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Float64Histogram interface{ float64Histogram() }
+
+// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to
+// experience a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Float64Gauge interface{ float64Gauge() }
+
+// Float64ObservableCounter is embedded in
+// [go.opentelemetry.io/otel/metric.Float64ObservableCounter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you
+// want users to experience a compilation error, signaling they need to update
+// to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Float64ObservableCounter interface{ float64ObservableCounter() }
+
+// Float64ObservableGauge is embedded in
+// [go.opentelemetry.io/otel/metric.Float64ObservableGauge].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you
+// want users to experience a compilation error, signaling they need to update
+// to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Float64ObservableGauge interface{ float64ObservableGauge() }
+
+// Float64ObservableUpDownCounter is embedded in
+// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]
+// if you want users to experience a compilation error, signaling they need to
+// update to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() }
+
+// Float64UpDownCounter is embedded in
+// [go.opentelemetry.io/otel/metric.Float64UpDownCounter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you
+// want users to experience a compilation error, signaling they need to update
+// to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface
+// is extended (which is something that can happen without a major version bump
+// of the API package).
+type Float64UpDownCounter interface{ float64UpDownCounter() }
+
+// Int64Counter is embedded in
+// [go.opentelemetry.io/otel/metric.Int64Counter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users
+// to experience a compilation error, signaling they need to update to your
+// latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64Counter] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Int64Counter interface{ int64Counter() }
+
+// Int64Histogram is embedded in
+// [go.opentelemetry.io/otel/metric.Int64Histogram].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want
+// users to experience a compilation error, signaling they need to update to
+// your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Int64Histogram interface{ int64Histogram() }
+
+// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience
+// a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Int64Gauge interface{ int64Gauge() }
+
+// Int64ObservableCounter is embedded in
+// [go.opentelemetry.io/otel/metric.Int64ObservableCounter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you
+// want users to experience a compilation error, signaling they need to update
+// to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Int64ObservableCounter interface{ int64ObservableCounter() }
+
+// Int64ObservableGauge is embedded in
+// [go.opentelemetry.io/otel/metric.Int64ObservableGauge].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you
+// want users to experience a compilation error, signaling they need to update
+// to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface
+// is extended (which is something that can happen without a major version bump
+// of the API package).
+type Int64ObservableGauge interface{ int64ObservableGauge() }
+
+// Int64ObservableUpDownCounter is embedded in
+// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if
+// you want users to experience a compilation error, signaling they need to
+// update to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() }
+
+// Int64UpDownCounter is embedded in
+// [go.opentelemetry.io/otel/metric.Int64UpDownCounter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want
+// users to experience a compilation error, signaling they need to update to
+// your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Int64UpDownCounter interface{ int64UpDownCounter() }
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go
new file mode 100644
index 000000000..9f48d5f11
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go
@@ -0,0 +1,376 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Observable is used as a grouping mechanism for all instruments that are
+// updated within a Callback.
+type Observable interface {
+ observable()
+}
+
+// InstrumentOption applies options to all instruments.
+type InstrumentOption interface {
+ Int64CounterOption
+ Int64UpDownCounterOption
+ Int64HistogramOption
+ Int64GaugeOption
+ Int64ObservableCounterOption
+ Int64ObservableUpDownCounterOption
+ Int64ObservableGaugeOption
+
+ Float64CounterOption
+ Float64UpDownCounterOption
+ Float64HistogramOption
+ Float64GaugeOption
+ Float64ObservableCounterOption
+ Float64ObservableUpDownCounterOption
+ Float64ObservableGaugeOption
+}
+
+// HistogramOption applies options to histogram instruments.
+type HistogramOption interface {
+ Int64HistogramOption
+ Float64HistogramOption
+}
+
+type descOpt string
+
+func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyFloat64ObservableUpDownCounter(
+ c Float64ObservableUpDownCounterConfig,
+) Float64ObservableUpDownCounterConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyInt64ObservableUpDownCounter(
+ c Int64ObservableUpDownCounterConfig,
+) Int64ObservableUpDownCounterConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig {
+ c.description = string(o)
+ return c
+}
+
+// WithDescription sets the instrument description.
+func WithDescription(desc string) InstrumentOption { return descOpt(desc) }
+
+type unitOpt string
+
+func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyFloat64ObservableUpDownCounter(
+ c Float64ObservableUpDownCounterConfig,
+) Float64ObservableUpDownCounterConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyInt64ObservableUpDownCounter(
+ c Int64ObservableUpDownCounterConfig,
+) Int64ObservableUpDownCounterConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig {
+ c.unit = string(o)
+ return c
+}
+
+// WithUnit sets the instrument unit.
+//
+// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code.
+func WithUnit(u string) InstrumentOption { return unitOpt(u) }
+
+// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries.
+//
+// This option is considered "advisory", and may be ignored by API implementations.
+func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) }
+
+type bucketOpt []float64
+
+func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
+ c.explicitBucketBoundaries = o
+ return c
+}
+
+func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
+ c.explicitBucketBoundaries = o
+ return c
+}
+
+// AddOption applies options to an addition measurement. See
+// [MeasurementOption] for other options that can be used as an AddOption.
+type AddOption interface {
+ applyAdd(AddConfig) AddConfig
+}
+
+// AddConfig contains options for an addition measurement.
+type AddConfig struct {
+ attrs attribute.Set
+}
+
+// NewAddConfig returns a new [AddConfig] with all opts applied.
+func NewAddConfig(opts []AddOption) AddConfig {
+ config := AddConfig{attrs: *attribute.EmptySet()}
+ for _, o := range opts {
+ config = o.applyAdd(config)
+ }
+ return config
+}
+
+// Attributes returns the configured attribute set.
+func (c AddConfig) Attributes() attribute.Set {
+ return c.attrs
+}
+
+// RecordOption applies options to an addition measurement. See
+// [MeasurementOption] for other options that can be used as a RecordOption.
+type RecordOption interface {
+ applyRecord(RecordConfig) RecordConfig
+}
+
+// RecordConfig contains options for a recorded measurement.
+type RecordConfig struct {
+ attrs attribute.Set
+}
+
+// NewRecordConfig returns a new [RecordConfig] with all opts applied.
+func NewRecordConfig(opts []RecordOption) RecordConfig {
+ config := RecordConfig{attrs: *attribute.EmptySet()}
+ for _, o := range opts {
+ config = o.applyRecord(config)
+ }
+ return config
+}
+
+// Attributes returns the configured attribute set.
+func (c RecordConfig) Attributes() attribute.Set {
+ return c.attrs
+}
+
+// ObserveOption applies options to an addition measurement. See
+// [MeasurementOption] for other options that can be used as a ObserveOption.
+type ObserveOption interface {
+ applyObserve(ObserveConfig) ObserveConfig
+}
+
+// ObserveConfig contains options for an observed measurement.
+type ObserveConfig struct {
+ attrs attribute.Set
+}
+
+// NewObserveConfig returns a new [ObserveConfig] with all opts applied.
+func NewObserveConfig(opts []ObserveOption) ObserveConfig {
+ config := ObserveConfig{attrs: *attribute.EmptySet()}
+ for _, o := range opts {
+ config = o.applyObserve(config)
+ }
+ return config
+}
+
+// Attributes returns the configured attribute set.
+func (c ObserveConfig) Attributes() attribute.Set {
+ return c.attrs
+}
+
+// MeasurementOption applies options to all instrument measurement.
+type MeasurementOption interface {
+ AddOption
+ RecordOption
+ ObserveOption
+}
+
+type attrOpt struct {
+ set attribute.Set
+}
+
+// mergeSets returns the union of keys between a and b. Any duplicate keys will
+// use the value associated with b.
+func mergeSets(a, b attribute.Set) attribute.Set {
+ // NewMergeIterator uses the first value for any duplicates.
+ iter := attribute.NewMergeIterator(&b, &a)
+ merged := make([]attribute.KeyValue, 0, a.Len()+b.Len())
+ for iter.Next() {
+ merged = append(merged, iter.Attribute())
+ }
+ return attribute.NewSet(merged...)
+}
+
+func (o attrOpt) applyAdd(c AddConfig) AddConfig {
+ switch {
+ case o.set.Len() == 0:
+ case c.attrs.Len() == 0:
+ c.attrs = o.set
+ default:
+ c.attrs = mergeSets(c.attrs, o.set)
+ }
+ return c
+}
+
+func (o attrOpt) applyRecord(c RecordConfig) RecordConfig {
+ switch {
+ case o.set.Len() == 0:
+ case c.attrs.Len() == 0:
+ c.attrs = o.set
+ default:
+ c.attrs = mergeSets(c.attrs, o.set)
+ }
+ return c
+}
+
+func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig {
+ switch {
+ case o.set.Len() == 0:
+ case c.attrs.Len() == 0:
+ c.attrs = o.set
+ default:
+ c.attrs = mergeSets(c.attrs, o.set)
+ }
+ return c
+}
+
+// WithAttributeSet sets the attribute Set associated with a measurement is
+// made with.
+//
+// If multiple WithAttributeSet or WithAttributes options are passed the
+// attributes will be merged together in the order they are passed. Attributes
+// with duplicate keys will use the last value passed.
+func WithAttributeSet(attributes attribute.Set) MeasurementOption {
+ return attrOpt{set: attributes}
+}
+
+// WithAttributes converts attributes into an attribute Set and sets the Set to
+// be associated with a measurement. This is shorthand for:
+//
+// cp := make([]attribute.KeyValue, len(attributes))
+// copy(cp, attributes)
+// WithAttributeSet(attribute.NewSet(cp...))
+//
+// [attribute.NewSet] may modify the passed attributes so this will make a copy
+// of attributes before creating a set in order to ensure this function is
+// concurrent safe. This makes this option function less optimized in
+// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be
+// preferred for performance sensitive code.
+//
+// See [WithAttributeSet] for information about how multiple WithAttributes are
+// merged.
+func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption {
+ cp := make([]attribute.KeyValue, len(attributes))
+ copy(cp, attributes)
+ return attrOpt{set: attribute.NewSet(cp...)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go
new file mode 100644
index 000000000..fdd2a7011
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/meter.go
@@ -0,0 +1,284 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+// MeterProvider provides access to named Meter instances, for instrumenting
+// an application or package.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type MeterProvider interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.MeterProvider
+
+ // Meter returns a new Meter with the provided name and configuration.
+ //
+ // A Meter should be scoped at most to a single package. The name needs to
+ // be unique so it does not collide with other names used by
+ // an application, nor other applications. To achieve this, the import path
+ // of the instrumentation package is recommended to be used as name.
+ //
+ // If the name is empty, then an implementation defined default name will
+ // be used instead.
+ Meter(name string, opts ...MeterOption) Meter
+}
+
+// Meter provides access to instrument instances for recording metrics.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Meter interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Meter
+
+ // Int64Counter returns a new Int64Counter instrument identified by name
+ // and configured with options. The instrument is used to synchronously
+ // record increasing int64 measurements during a computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error)
+
+ // Int64UpDownCounter returns a new Int64UpDownCounter instrument
+ // identified by name and configured with options. The instrument is used
+ // to synchronously record int64 measurements during a computational
+ // operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error)
+
+ // Int64Histogram returns a new Int64Histogram instrument identified by
+ // name and configured with options. The instrument is used to
+ // synchronously record the distribution of int64 measurements during a
+ // computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error)
+
+ // Int64Gauge returns a new Int64Gauge instrument identified by name and
+ // configured with options. The instrument is used to synchronously record
+ // instantaneous int64 measurements during a computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error)
+
+ // Int64ObservableCounter returns a new Int64ObservableCounter identified
+ // by name and configured with options. The instrument is used to
+ // asynchronously record increasing int64 measurements once per a
+ // measurement collection cycle.
+ //
+ // Measurements for the returned instrument are made via a callback. Use
+ // the WithInt64Callback option to register the callback here, or use the
+ // RegisterCallback method of this Meter to register one later. See the
+ // Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error)
+
+ // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter
+ // instrument identified by name and configured with options. The
+ // instrument is used to asynchronously record int64 measurements once per
+ // a measurement collection cycle.
+ //
+ // Measurements for the returned instrument are made via a callback. Use
+ // the WithInt64Callback option to register the callback here, or use the
+ // RegisterCallback method of this Meter to register one later. See the
+ // Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Int64ObservableUpDownCounter(
+ name string,
+ options ...Int64ObservableUpDownCounterOption,
+ ) (Int64ObservableUpDownCounter, error)
+
+ // Int64ObservableGauge returns a new Int64ObservableGauge instrument
+ // identified by name and configured with options. The instrument is used
+ // to asynchronously record instantaneous int64 measurements once per a
+ // measurement collection cycle.
+ //
+ // Measurements for the returned instrument are made via a callback. Use
+ // the WithInt64Callback option to register the callback here, or use the
+ // RegisterCallback method of this Meter to register one later. See the
+ // Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error)
+
+ // Float64Counter returns a new Float64Counter instrument identified by
+ // name and configured with options. The instrument is used to
+ // synchronously record increasing float64 measurements during a
+ // computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error)
+
+ // Float64UpDownCounter returns a new Float64UpDownCounter instrument
+ // identified by name and configured with options. The instrument is used
+ // to synchronously record float64 measurements during a computational
+ // operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error)
+
+ // Float64Histogram returns a new Float64Histogram instrument identified by
+ // name and configured with options. The instrument is used to
+ // synchronously record the distribution of float64 measurements during a
+ // computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error)
+
+ // Float64Gauge returns a new Float64Gauge instrument identified by name and
+ // configured with options. The instrument is used to synchronously record
+ // instantaneous float64 measurements during a computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error)
+
+ // Float64ObservableCounter returns a new Float64ObservableCounter
+ // instrument identified by name and configured with options. The
+ // instrument is used to asynchronously record increasing float64
+ // measurements once per a measurement collection cycle.
+ //
+ // Measurements for the returned instrument are made via a callback. Use
+ // the WithFloat64Callback option to register the callback here, or use the
+ // RegisterCallback method of this Meter to register one later. See the
+ // Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error)
+
+ // Float64ObservableUpDownCounter returns a new
+ // Float64ObservableUpDownCounter instrument identified by name and
+ // configured with options. The instrument is used to asynchronously record
+ // float64 measurements once per a measurement collection cycle.
+ //
+ // Measurements for the returned instrument are made via a callback. Use
+ // the WithFloat64Callback option to register the callback here, or use the
+ // RegisterCallback method of this Meter to register one later. See the
+ // Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Float64ObservableUpDownCounter(
+ name string,
+ options ...Float64ObservableUpDownCounterOption,
+ ) (Float64ObservableUpDownCounter, error)
+
+ // Float64ObservableGauge returns a new Float64ObservableGauge instrument
+ // identified by name and configured with options. The instrument is used
+ // to asynchronously record instantaneous float64 measurements once per a
+ // measurement collection cycle.
+ //
+ // Measurements for the returned instrument are made via a callback. Use
+ // the WithFloat64Callback option to register the callback here, or use the
+ // RegisterCallback method of this Meter to register one later. See the
+ // Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error)
+
+ // RegisterCallback registers f to be called during the collection of a
+ // measurement cycle.
+ //
+ // If Unregister of the returned Registration is called, f needs to be
+ // unregistered and not called during collection.
+ //
+ // The instruments f is registered with are the only instruments that f may
+ // observe values for.
+ //
+ // If no instruments are passed, f should not be registered nor called
+ // during collection.
+ //
+ // The function f needs to be concurrent safe.
+ RegisterCallback(f Callback, instruments ...Observable) (Registration, error)
+}
+
+// Callback is a function registered with a Meter that makes observations for
+// the set of instruments it is registered with. The Observer parameter is used
+// to record measurement observations for these instruments.
+//
+// The function needs to complete in a finite amount of time and the deadline
+// of the passed context is expected to be honored.
+//
+// The function needs to make unique observations across all registered
+// Callbacks. Meaning, it should not report measurements for an instrument with
+// the same attributes as another Callback will report.
+//
+// The function needs to be concurrent safe.
+type Callback func(context.Context, Observer) error
+
+// Observer records measurements for multiple instruments in a Callback.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Observer interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Observer
+
+ // ObserveFloat64 records the float64 value for obsrv.
+ ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption)
+
+ // ObserveInt64 records the int64 value for obsrv.
+ ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption)
+}
+
+// Registration is an token representing the unique registration of a callback
+// for a set of instruments with a Meter.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Registration interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Registration
+
+ // Unregister removes the callback registration from a Meter.
+ //
+ // This method needs to be idempotent and concurrent safe.
+ Unregister() error
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/vendor/go.opentelemetry.io/otel/metric/noop/README.md
new file mode 100644
index 000000000..bb8969435
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/noop/README.md
@@ -0,0 +1,3 @@
+# Metric Noop
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop)
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
new file mode 100644
index 000000000..9afb69e58
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
@@ -0,0 +1,296 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package noop provides an implementation of the OpenTelemetry metric API that
+// produces no telemetry and minimizes used computation resources.
+//
+// Using this package to implement the OpenTelemetry metric API will
+// effectively disable OpenTelemetry.
+//
+// This implementation can be embedded in other implementations of the
+// OpenTelemetry metric API. Doing so will mean the implementation defaults to
+// no operation for methods it does not implement.
+package noop // import "go.opentelemetry.io/otel/metric/noop"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+var (
+ // Compile-time check this implements the OpenTelemetry API.
+
+ _ metric.MeterProvider = MeterProvider{}
+ _ metric.Meter = Meter{}
+ _ metric.Observer = Observer{}
+ _ metric.Registration = Registration{}
+ _ metric.Int64Counter = Int64Counter{}
+ _ metric.Float64Counter = Float64Counter{}
+ _ metric.Int64UpDownCounter = Int64UpDownCounter{}
+ _ metric.Float64UpDownCounter = Float64UpDownCounter{}
+ _ metric.Int64Histogram = Int64Histogram{}
+ _ metric.Float64Histogram = Float64Histogram{}
+ _ metric.Int64Gauge = Int64Gauge{}
+ _ metric.Float64Gauge = Float64Gauge{}
+ _ metric.Int64ObservableCounter = Int64ObservableCounter{}
+ _ metric.Float64ObservableCounter = Float64ObservableCounter{}
+ _ metric.Int64ObservableGauge = Int64ObservableGauge{}
+ _ metric.Float64ObservableGauge = Float64ObservableGauge{}
+ _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{}
+ _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{}
+ _ metric.Int64Observer = Int64Observer{}
+ _ metric.Float64Observer = Float64Observer{}
+)
+
+// MeterProvider is an OpenTelemetry No-Op MeterProvider.
+type MeterProvider struct{ embedded.MeterProvider }
+
+// NewMeterProvider returns a MeterProvider that does not record any telemetry.
+func NewMeterProvider() MeterProvider {
+ return MeterProvider{}
+}
+
+// Meter returns an OpenTelemetry Meter that does not record any telemetry.
+func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter {
+ return Meter{}
+}
+
+// Meter is an OpenTelemetry No-Op Meter.
+type Meter struct{ embedded.Meter }
+
+// Int64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) {
+ return Int64Counter{}, nil
+}
+
+// Int64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
+ return Int64UpDownCounter{}, nil
+}
+
+// Int64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
+ return Int64Histogram{}, nil
+}
+
+// Int64Gauge returns a Gauge used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
+ return Int64Gauge{}, nil
+}
+
+// Int64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableCounter(
+ string,
+ ...metric.Int64ObservableCounterOption,
+) (metric.Int64ObservableCounter, error) {
+ return Int64ObservableCounter{}, nil
+}
+
+// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Int64ObservableUpDownCounter(
+ string,
+ ...metric.Int64ObservableUpDownCounterOption,
+) (metric.Int64ObservableUpDownCounter, error) {
+ return Int64ObservableUpDownCounter{}, nil
+}
+
+// Int64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
+ return Int64ObservableGauge{}, nil
+}
+
+// Float64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) {
+ return Float64Counter{}, nil
+}
+
+// Float64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
+ return Float64UpDownCounter{}, nil
+}
+
+// Float64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
+ return Float64Histogram{}, nil
+}
+
+// Float64Gauge returns a Gauge used to record float64 measurements that
+// produces no telemetry.
+func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
+ return Float64Gauge{}, nil
+}
+
+// Float64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableCounter(
+ string,
+ ...metric.Float64ObservableCounterOption,
+) (metric.Float64ObservableCounter, error) {
+ return Float64ObservableCounter{}, nil
+}
+
+// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Float64ObservableUpDownCounter(
+ string,
+ ...metric.Float64ObservableUpDownCounterOption,
+) (metric.Float64ObservableUpDownCounter, error) {
+ return Float64ObservableUpDownCounter{}, nil
+}
+
+// Float64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableGauge(
+ string,
+ ...metric.Float64ObservableGaugeOption,
+) (metric.Float64ObservableGauge, error) {
+ return Float64ObservableGauge{}, nil
+}
+
+// RegisterCallback performs no operation.
+func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) {
+ return Registration{}, nil
+}
+
+// Observer acts as a recorder of measurements for multiple instruments in a
+// Callback, it performing no operation.
+type Observer struct{ embedded.Observer }
+
+// ObserveFloat64 performs no operation.
+func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) {
+}
+
+// ObserveInt64 performs no operation.
+func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) {
+}
+
+// Registration is the registration of a Callback with a No-Op Meter.
+type Registration struct{ embedded.Registration }
+
+// Unregister unregisters the Callback the Registration represents with the
+// No-Op Meter. This will always return nil because the No-Op Meter performs no
+// operation, including hold any record of registrations.
+func (Registration) Unregister() error { return nil }
+
+// Int64Counter is an OpenTelemetry Counter used to record int64 measurements.
+// It produces no telemetry.
+type Int64Counter struct{ embedded.Int64Counter }
+
+// Add performs no operation.
+func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64Counter is an OpenTelemetry Counter used to record float64
+// measurements. It produces no telemetry.
+type Float64Counter struct{ embedded.Float64Counter }
+
+// Add performs no operation.
+func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64
+// measurements. It produces no telemetry.
+type Int64UpDownCounter struct{ embedded.Int64UpDownCounter }
+
+// Add performs no operation.
+func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64UpDownCounter struct{ embedded.Float64UpDownCounter }
+
+// Add performs no operation.
+func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64Histogram is an OpenTelemetry Histogram used to record int64
+// measurements. It produces no telemetry.
+type Int64Histogram struct{ embedded.Int64Histogram }
+
+// Record performs no operation.
+func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {}
+
+// Float64Histogram is an OpenTelemetry Histogram used to record float64
+// measurements. It produces no telemetry.
+type Float64Histogram struct{ embedded.Float64Histogram }
+
+// Record performs no operation.
+func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {}
+
+// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64
+// measurements. It produces no telemetry.
+type Int64Gauge struct{ embedded.Int64Gauge }
+
+// Record performs no operation.
+func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {}
+
+// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64
+// measurements. It produces no telemetry.
+type Float64Gauge struct{ embedded.Float64Gauge }
+
+// Record performs no operation.
+func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {}
+
+// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableCounter struct {
+ metric.Int64Observable
+ embedded.Int64ObservableCounter
+}
+
+// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableCounter struct {
+ metric.Float64Observable
+ embedded.Float64ObservableCounter
+}
+
+// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableGauge struct {
+ metric.Int64Observable
+ embedded.Int64ObservableGauge
+}
+
+// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableGauge struct {
+ metric.Float64Observable
+ embedded.Float64ObservableGauge
+}
+
+// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record int64 measurements. It produces no telemetry.
+type Int64ObservableUpDownCounter struct {
+ metric.Int64Observable
+ embedded.Int64ObservableUpDownCounter
+}
+
+// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record float64 measurements. It produces no telemetry.
+type Float64ObservableUpDownCounter struct {
+ metric.Float64Observable
+ embedded.Float64ObservableUpDownCounter
+}
+
+// Int64Observer is a recorder of int64 measurements that performs no operation.
+type Int64Observer struct{ embedded.Int64Observer }
+
+// Observe performs no operation.
+func (Int64Observer) Observe(int64, ...metric.ObserveOption) {}
+
+// Float64Observer is a recorder of float64 measurements that performs no
+// operation.
+type Float64Observer struct{ embedded.Float64Observer }
+
+// Observe performs no operation.
+func (Float64Observer) Observe(float64, ...metric.ObserveOption) {}
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
new file mode 100644
index 000000000..8403a4bad
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
@@ -0,0 +1,226 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+// Float64Counter is an instrument that records increasing float64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64Counter interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64Counter
+
+ // Add records a change to the counter.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Add(ctx context.Context, incr float64, options ...AddOption)
+}
+
+// Float64CounterConfig contains options for synchronous counter instruments that
+// record float64 values.
+type Float64CounterConfig struct {
+ description string
+ unit string
+}
+
+// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts
+// applied.
+func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig {
+ var config Float64CounterConfig
+ for _, o := range opts {
+ config = o.applyFloat64Counter(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Float64CounterConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64CounterConfig) Unit() string {
+ return c.unit
+}
+
+// Float64CounterOption applies options to a [Float64CounterConfig]. See
+// [InstrumentOption] for other options that can be used as a
+// Float64CounterOption.
+type Float64CounterOption interface {
+ applyFloat64Counter(Float64CounterConfig) Float64CounterConfig
+}
+
+// Float64UpDownCounter is an instrument that records increasing or decreasing
+// float64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64UpDownCounter interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64UpDownCounter
+
+ // Add records a change to the counter.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Add(ctx context.Context, incr float64, options ...AddOption)
+}
+
+// Float64UpDownCounterConfig contains options for synchronous counter
+// instruments that record float64 values.
+type Float64UpDownCounterConfig struct {
+ description string
+ unit string
+}
+
+// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig]
+// with all opts applied.
+func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig {
+ var config Float64UpDownCounterConfig
+ for _, o := range opts {
+ config = o.applyFloat64UpDownCounter(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Float64UpDownCounterConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64UpDownCounterConfig) Unit() string {
+ return c.unit
+}
+
+// Float64UpDownCounterOption applies options to a
+// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that
+// can be used as a Float64UpDownCounterOption.
+type Float64UpDownCounterOption interface {
+ applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig
+}
+
+// Float64Histogram is an instrument that records a distribution of float64
+// values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64Histogram interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64Histogram
+
+ // Record adds an additional value to the distribution.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Record(ctx context.Context, incr float64, options ...RecordOption)
+}
+
+// Float64HistogramConfig contains options for synchronous histogram
+// instruments that record float64 values.
+type Float64HistogramConfig struct {
+ description string
+ unit string
+ explicitBucketBoundaries []float64
+}
+
+// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all
+// opts applied.
+func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig {
+ var config Float64HistogramConfig
+ for _, o := range opts {
+ config = o.applyFloat64Histogram(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Float64HistogramConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64HistogramConfig) Unit() string {
+ return c.unit
+}
+
+// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
+func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 {
+ return c.explicitBucketBoundaries
+}
+
+// Float64HistogramOption applies options to a [Float64HistogramConfig]. See
+// [InstrumentOption] for other options that can be used as a
+// Float64HistogramOption.
+type Float64HistogramOption interface {
+ applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig
+}
+
+// Float64Gauge is an instrument that records instantaneous float64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64Gauge interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64Gauge
+
+ // Record records the instantaneous value.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Record(ctx context.Context, value float64, options ...RecordOption)
+}
+
+// Float64GaugeConfig contains options for synchronous gauge instruments that
+// record float64 values.
+type Float64GaugeConfig struct {
+ description string
+ unit string
+}
+
+// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts
+// applied.
+func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig {
+ var config Float64GaugeConfig
+ for _, o := range opts {
+ config = o.applyFloat64Gauge(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Float64GaugeConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64GaugeConfig) Unit() string {
+ return c.unit
+}
+
+// Float64GaugeOption applies options to a [Float64GaugeConfig]. See
+// [InstrumentOption] for other options that can be used as a
+// Float64GaugeOption.
+type Float64GaugeOption interface {
+ applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go
new file mode 100644
index 000000000..783fdfba7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go
@@ -0,0 +1,226 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+// Int64Counter is an instrument that records increasing int64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64Counter interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64Counter
+
+ // Add records a change to the counter.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Add(ctx context.Context, incr int64, options ...AddOption)
+}
+
+// Int64CounterConfig contains options for synchronous counter instruments that
+// record int64 values.
+type Int64CounterConfig struct {
+ description string
+ unit string
+}
+
+// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts
+// applied.
+func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig {
+ var config Int64CounterConfig
+ for _, o := range opts {
+ config = o.applyInt64Counter(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Int64CounterConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64CounterConfig) Unit() string {
+ return c.unit
+}
+
+// Int64CounterOption applies options to a [Int64CounterConfig]. See
+// [InstrumentOption] for other options that can be used as an
+// Int64CounterOption.
+type Int64CounterOption interface {
+ applyInt64Counter(Int64CounterConfig) Int64CounterConfig
+}
+
+// Int64UpDownCounter is an instrument that records increasing or decreasing
+// int64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64UpDownCounter interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64UpDownCounter
+
+ // Add records a change to the counter.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Add(ctx context.Context, incr int64, options ...AddOption)
+}
+
+// Int64UpDownCounterConfig contains options for synchronous counter
+// instruments that record int64 values.
+type Int64UpDownCounterConfig struct {
+ description string
+ unit string
+}
+
+// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with
+// all opts applied.
+func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig {
+ var config Int64UpDownCounterConfig
+ for _, o := range opts {
+ config = o.applyInt64UpDownCounter(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Int64UpDownCounterConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64UpDownCounterConfig) Unit() string {
+ return c.unit
+}
+
+// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig].
+// See [InstrumentOption] for other options that can be used as an
+// Int64UpDownCounterOption.
+type Int64UpDownCounterOption interface {
+ applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig
+}
+
+// Int64Histogram is an instrument that records a distribution of int64
+// values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64Histogram interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64Histogram
+
+ // Record adds an additional value to the distribution.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Record(ctx context.Context, incr int64, options ...RecordOption)
+}
+
+// Int64HistogramConfig contains options for synchronous histogram instruments
+// that record int64 values.
+type Int64HistogramConfig struct {
+ description string
+ unit string
+ explicitBucketBoundaries []float64
+}
+
+// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts
+// applied.
+func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig {
+ var config Int64HistogramConfig
+ for _, o := range opts {
+ config = o.applyInt64Histogram(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Int64HistogramConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64HistogramConfig) Unit() string {
+ return c.unit
+}
+
+// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
+func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 {
+ return c.explicitBucketBoundaries
+}
+
+// Int64HistogramOption applies options to a [Int64HistogramConfig]. See
+// [InstrumentOption] for other options that can be used as an
+// Int64HistogramOption.
+type Int64HistogramOption interface {
+ applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig
+}
+
+// Int64Gauge is an instrument that records instantaneous int64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64Gauge interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64Gauge
+
+ // Record records the instantaneous value.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Record(ctx context.Context, value int64, options ...RecordOption)
+}
+
+// Int64GaugeConfig contains options for synchronous gauge instruments that
+// record int64 values.
+type Int64GaugeConfig struct {
+ description string
+ unit string
+}
+
+// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts
+// applied.
+func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig {
+ var config Int64GaugeConfig
+ for _, o := range opts {
+ config = o.applyInt64Gauge(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Int64GaugeConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64GaugeConfig) Unit() string {
+ return c.unit
+}
+
+// Int64GaugeOption applies options to a [Int64GaugeConfig]. See
+// [InstrumentOption] for other options that can be used as a
+// Int64GaugeOption.
+type Int64GaugeOption interface {
+ applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig
+}
diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go
new file mode 100644
index 000000000..2fd949733
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otel // import "go.opentelemetry.io/otel"
+
+import (
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/propagation"
+)
+
+// GetTextMapPropagator returns the global TextMapPropagator. If none has been
+// set, a No-Op TextMapPropagator is returned.
+func GetTextMapPropagator() propagation.TextMapPropagator {
+ return global.TextMapPropagator()
+}
+
+// SetTextMapPropagator sets propagator as the global TextMapPropagator.
+func SetTextMapPropagator(propagator propagation.TextMapPropagator) {
+ global.SetTextMapPropagator(propagator)
+}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/README.md b/vendor/go.opentelemetry.io/otel/propagation/README.md
new file mode 100644
index 000000000..e2959ac74
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/README.md
@@ -0,0 +1,3 @@
+# Propagation
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/propagation)
diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
new file mode 100644
index 000000000..ebda5026d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
@@ -0,0 +1,77 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package propagation // import "go.opentelemetry.io/otel/propagation"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/baggage"
+)
+
+const baggageHeader = "baggage"
+
+// Baggage is a propagator that supports the W3C Baggage format.
+//
+// This propagates user-defined baggage associated with a trace. The complete
+// specification is defined at https://www.w3.org/TR/baggage/.
+type Baggage struct{}
+
+var _ TextMapPropagator = Baggage{}
+
+// Inject sets baggage key-values from ctx into the carrier.
+func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
+ bStr := baggage.FromContext(ctx).String()
+ if bStr != "" {
+ carrier.Set(baggageHeader, bStr)
+ }
+}
+
+// Extract returns a copy of parent with the baggage from the carrier added.
+// If carrier implements [ValuesGetter] (e.g. [HeaderCarrier]), Values is invoked
+// for multiple values extraction. Otherwise, Get is called.
+func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context {
+ if multiCarrier, ok := carrier.(ValuesGetter); ok {
+ return extractMultiBaggage(parent, multiCarrier)
+ }
+ return extractSingleBaggage(parent, carrier)
+}
+
+// Fields returns the keys who's values are set with Inject.
+func (b Baggage) Fields() []string {
+ return []string{baggageHeader}
+}
+
+func extractSingleBaggage(parent context.Context, carrier TextMapCarrier) context.Context {
+ bStr := carrier.Get(baggageHeader)
+ if bStr == "" {
+ return parent
+ }
+
+ bag, err := baggage.Parse(bStr)
+ if err != nil {
+ return parent
+ }
+ return baggage.ContextWithBaggage(parent, bag)
+}
+
+func extractMultiBaggage(parent context.Context, carrier ValuesGetter) context.Context {
+ bVals := carrier.Values(baggageHeader)
+ if len(bVals) == 0 {
+ return parent
+ }
+ var members []baggage.Member
+ for _, bStr := range bVals {
+ currBag, err := baggage.Parse(bStr)
+ if err != nil {
+ continue
+ }
+ members = append(members, currBag.Members()...)
+ }
+
+ b, err := baggage.New(members...)
+ if err != nil || b.Len() == 0 {
+ return parent
+ }
+ return baggage.ContextWithBaggage(parent, b)
+}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go
new file mode 100644
index 000000000..33a3baf15
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go
@@ -0,0 +1,13 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package propagation contains OpenTelemetry context propagators.
+
+OpenTelemetry propagators are used to extract and inject context data from and
+into messages exchanged by applications. The propagator supported by this
+package is the W3C Trace Context encoding
+(https://www.w3.org/TR/trace-context/), and W3C Baggage
+(https://www.w3.org/TR/baggage/).
+*/
+package propagation // import "go.opentelemetry.io/otel/propagation"
diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
new file mode 100644
index 000000000..5c8c26ea2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
@@ -0,0 +1,168 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package propagation // import "go.opentelemetry.io/otel/propagation"
+
+import (
+ "context"
+ "net/http"
+)
+
+// TextMapCarrier is the storage medium used by a TextMapPropagator.
+// See ValuesGetter for how a TextMapCarrier can get multiple values for a key.
+type TextMapCarrier interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Get returns the value associated with the passed key.
+ Get(key string) string
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Set stores the key-value pair.
+ Set(key string, value string)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Keys lists the keys stored in this carrier.
+ Keys() []string
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// ValuesGetter can return multiple values for a single key,
+// with contrast to TextMapCarrier.Get which returns a single value.
+type ValuesGetter interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Values returns all values associated with the passed key.
+ Values(key string) []string
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage
+// medium for propagated key-value pairs.
+type MapCarrier map[string]string
+
+// Compile time check that MapCarrier implements the TextMapCarrier.
+var _ TextMapCarrier = MapCarrier{}
+
+// Get returns the value associated with the passed key.
+func (c MapCarrier) Get(key string) string {
+ return c[key]
+}
+
+// Set stores the key-value pair.
+func (c MapCarrier) Set(key, value string) {
+ c[key] = value
+}
+
+// Keys lists the keys stored in this carrier.
+func (c MapCarrier) Keys() []string {
+ keys := make([]string, 0, len(c))
+ for k := range c {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier and ValuesGetter interfaces.
+type HeaderCarrier http.Header
+
+// Compile time check that HeaderCarrier implements ValuesGetter.
+var _ TextMapCarrier = HeaderCarrier{}
+
+// Compile time check that HeaderCarrier implements TextMapCarrier.
+var _ ValuesGetter = HeaderCarrier{}
+
+// Get returns the first value associated with the passed key.
+func (hc HeaderCarrier) Get(key string) string {
+ return http.Header(hc).Get(key)
+}
+
+// Values returns all values associated with the passed key.
+func (hc HeaderCarrier) Values(key string) []string {
+ return http.Header(hc).Values(key)
+}
+
+// Set stores the key-value pair.
+func (hc HeaderCarrier) Set(key string, value string) {
+ http.Header(hc).Set(key, value)
+}
+
+// Keys lists the keys stored in this carrier.
+func (hc HeaderCarrier) Keys() []string {
+ keys := make([]string, 0, len(hc))
+ for k := range hc {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+// TextMapPropagator propagates cross-cutting concerns as key-value text
+// pairs within a carrier that travels in-band across process boundaries.
+type TextMapPropagator interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Inject set cross-cutting concerns from the Context into the carrier.
+ Inject(ctx context.Context, carrier TextMapCarrier)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Extract reads cross-cutting concerns from the carrier into a Context.
+ // Implementations may check if the carrier implements ValuesGetter,
+ // to support extraction of multiple values per key.
+ Extract(ctx context.Context, carrier TextMapCarrier) context.Context
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Fields returns the keys whose values are set with Inject.
+ Fields() []string
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+type compositeTextMapPropagator []TextMapPropagator
+
+func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) {
+ for _, i := range p {
+ i.Inject(ctx, carrier)
+ }
+}
+
+func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context {
+ for _, i := range p {
+ ctx = i.Extract(ctx, carrier)
+ }
+ return ctx
+}
+
+func (p compositeTextMapPropagator) Fields() []string {
+ unique := make(map[string]struct{})
+ for _, i := range p {
+ for _, k := range i.Fields() {
+ unique[k] = struct{}{}
+ }
+ }
+
+ fields := make([]string, 0, len(unique))
+ for k := range unique {
+ fields = append(fields, k)
+ }
+ return fields
+}
+
+// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the
+// group of passed TextMapPropagator. This allows different cross-cutting
+// concerns to be propagates in a unified manner.
+//
+// The returned TextMapPropagator will inject and extract cross-cutting
+// concerns in the order the TextMapPropagators were provided. Additionally,
+// the Fields method will return a de-duplicated slice of the keys that are
+// set with the Inject method.
+func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator {
+ return compositeTextMapPropagator(p)
+}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
new file mode 100644
index 000000000..6870e316d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
@@ -0,0 +1,156 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package propagation // import "go.opentelemetry.io/otel/propagation"
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+ "strings"
+
+ "go.opentelemetry.io/otel/trace"
+)
+
+const (
+ supportedVersion = 0
+ maxVersion = 254
+ traceparentHeader = "traceparent"
+ tracestateHeader = "tracestate"
+ delimiter = "-"
+)
+
+// TraceContext is a propagator that supports the W3C Trace Context format
+// (https://www.w3.org/TR/trace-context/)
+//
+// This propagator will propagate the traceparent and tracestate headers to
+// guarantee traces are not broken. It is up to the users of this propagator
+// to choose if they want to participate in a trace by modifying the
+// traceparent header and relevant parts of the tracestate header containing
+// their proprietary information.
+type TraceContext struct{}
+
+var (
+ _ TextMapPropagator = TraceContext{}
+ versionPart = fmt.Sprintf("%.2X", supportedVersion)
+)
+
+// Inject injects the trace context from ctx into carrier.
+func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
+ sc := trace.SpanContextFromContext(ctx)
+ if !sc.IsValid() {
+ return
+ }
+
+ if ts := sc.TraceState().String(); ts != "" {
+ carrier.Set(tracestateHeader, ts)
+ }
+
+ // Clear all flags other than the trace-context supported sampling bit.
+ flags := sc.TraceFlags() & trace.FlagsSampled
+
+ var sb strings.Builder
+ sb.Grow(2 + 32 + 16 + 2 + 3)
+ _, _ = sb.WriteString(versionPart)
+ traceID := sc.TraceID()
+ spanID := sc.SpanID()
+ flagByte := [1]byte{byte(flags)}
+ var buf [32]byte
+ for _, src := range [][]byte{traceID[:], spanID[:], flagByte[:]} {
+ _ = sb.WriteByte(delimiter[0])
+ n := hex.Encode(buf[:], src)
+ _, _ = sb.Write(buf[:n])
+ }
+ carrier.Set(traceparentHeader, sb.String())
+}
+
+// Extract reads tracecontext from the carrier into a returned Context.
+//
+// The returned Context will be a copy of ctx and contain the extracted
+// tracecontext as the remote SpanContext. If the extracted tracecontext is
+// invalid, the passed ctx will be returned directly instead.
+func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context {
+ sc := tc.extract(carrier)
+ if !sc.IsValid() {
+ return ctx
+ }
+ return trace.ContextWithRemoteSpanContext(ctx, sc)
+}
+
+func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
+ h := carrier.Get(traceparentHeader)
+ if h == "" {
+ return trace.SpanContext{}
+ }
+
+ var ver [1]byte
+ if !extractPart(ver[:], &h, 2) {
+ return trace.SpanContext{}
+ }
+ version := int(ver[0])
+ if version > maxVersion {
+ return trace.SpanContext{}
+ }
+
+ var scc trace.SpanContextConfig
+ if !extractPart(scc.TraceID[:], &h, 32) {
+ return trace.SpanContext{}
+ }
+ if !extractPart(scc.SpanID[:], &h, 16) {
+ return trace.SpanContext{}
+ }
+
+ var opts [1]byte
+ if !extractPart(opts[:], &h, 2) {
+ return trace.SpanContext{}
+ }
+ if version == 0 && (h != "" || opts[0] > 2) {
+ // version 0 not allow extra
+ // version 0 not allow other flag
+ return trace.SpanContext{}
+ }
+
+ // Clear all flags other than the trace-context supported sampling bit.
+ scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled
+
+ // Ignore the error returned here. Failure to parse tracestate MUST NOT
+ // affect the parsing of traceparent according to the W3C tracecontext
+ // specification.
+ scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader))
+ scc.Remote = true
+
+ sc := trace.NewSpanContext(scc)
+ if !sc.IsValid() {
+ return trace.SpanContext{}
+ }
+
+ return sc
+}
+
+// upperHex detect hex is upper case Unicode characters.
+func upperHex(v string) bool {
+ for _, c := range v {
+ if c >= 'A' && c <= 'F' {
+ return true
+ }
+ }
+ return false
+}
+
+func extractPart(dst []byte, h *string, n int) bool {
+ part, left, _ := strings.Cut(*h, delimiter)
+ *h = left
+ // hex.Decode decodes unsupported upper-case characters, so exclude explicitly.
+ if len(part) != n || upperHex(part) {
+ return false
+ }
+ if p, err := hex.Decode(dst, []byte(part)); err != nil || p != n/2 {
+ return false
+ }
+ return true
+}
+
+// Fields returns the keys who's values are set with Inject.
+func (tc TraceContext) Fields() []string {
+ return []string{traceparentHeader, tracestateHeader}
+}
diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json
new file mode 100644
index 000000000..fa5acf2d3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/renovate.json
@@ -0,0 +1,35 @@
+{
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+ "extends": [
+ "config:best-practices",
+ "helpers:pinGitHubActionDigestsToSemver"
+ ],
+ "ignorePaths": [],
+ "labels": ["Skip Changelog", "dependencies"],
+ "postUpdateOptions" : [
+ "gomodTidy"
+ ],
+ "packageRules": [
+ {
+ "matchManagers": ["gomod"],
+ "matchDepTypes": ["indirect"],
+ "enabled": true
+ },
+ {
+ "matchPackageNames": ["go.opentelemetry.io/build-tools/**"],
+ "groupName": "build-tools"
+ },
+ {
+ "matchPackageNames": ["google.golang.org/genproto/googleapis/**"],
+ "groupName": "googleapis"
+ },
+ {
+ "matchPackageNames": ["golang.org/x/**"],
+ "groupName": "golang.org/x"
+ },
+ {
+ "matchPackageNames": ["go.opentelemetry.io/otel/sdk/log/logtest"],
+ "enabled": false
+ }
+ ]
+}
diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt
new file mode 100644
index 000000000..1bb55fb1c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/requirements.txt
@@ -0,0 +1 @@
+codespell==2.4.1
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md
new file mode 100644
index 000000000..82e1f46b4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.20.0
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
new file mode 100644
index 000000000..6685c392b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
@@ -0,0 +1,1198 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Describes HTTP attributes.
+const (
+ // HTTPMethodKey is the attribute Key conforming to the "http.method"
+ // semantic conventions. It represents the hTTP request method.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ HTTPMethodKey = attribute.Key("http.method")
+
+ // HTTPStatusCodeKey is the attribute Key conforming to the
+ // "http.status_code" semantic conventions. It represents the [HTTP
+ // response status code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If and only if one was
+ // received/sent.)
+ // Stability: stable
+ // Examples: 200
+ HTTPStatusCodeKey = attribute.Key("http.status_code")
+)
+
+// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
+// semantic conventions. It represents the hTTP request method.
+func HTTPMethod(val string) attribute.KeyValue {
+ return HTTPMethodKey.String(val)
+}
+
+// HTTPStatusCode returns an attribute KeyValue conforming to the
+// "http.status_code" semantic conventions. It represents the [HTTP response
+// status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPStatusCode(val int) attribute.KeyValue {
+ return HTTPStatusCodeKey.Int(val)
+}
+
+// HTTP Server spans attributes
+const (
+ // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
+ // semantic conventions. It represents the URI scheme identifying the used
+ // protocol.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'http', 'https'
+ HTTPSchemeKey = attribute.Key("http.scheme")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route"
+ // semantic conventions. It represents the matched route (path template in
+ // the format used by the respective server framework). See note below
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If and only if it's available)
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: MUST NOT be populated when this is not supported by the HTTP
+ // server framework as the route attribute should have low-cardinality and
+ // the URI path can NOT substitute it.
+ // SHOULD include the [application
+ // root](/specification/trace/semantic_conventions/http.md#http-server-definitions)
+ // if there is one.
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
+// semantic conventions. It represents the URI scheme identifying the used
+// protocol.
+func HTTPScheme(val string) attribute.KeyValue {
+ return HTTPSchemeKey.String(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route (path template in the
+// format used by the respective server framework). See note below
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the name identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'click', 'exception'
+ EventNameKey = attribute.Key("event.name")
+
+ // EventDomainKey is the attribute Key conforming to the "event.domain"
+ // semantic conventions. It represents the domain identifies the business
+ // context for the events.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: Events across different domains may have same `event.name`, yet be
+ // unrelated events.
+ EventDomainKey = attribute.Key("event.domain")
+)
+
+var (
+ // Events from browser apps
+ EventDomainBrowser = EventDomainKey.String("browser")
+ // Events from mobile apps
+ EventDomainDevice = EventDomainKey.String("device")
+ // Events from Kubernetes
+ EventDomainK8S = EventDomainKey.String("k8s")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the name identifies the event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetTransportKey is the attribute Key conforming to the "net.transport"
+ // semantic conventions. It represents the transport protocol used. See
+ // note below.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ NetTransportKey = attribute.Key("net.transport")
+
+ // NetProtocolNameKey is the attribute Key conforming to the
+ // "net.protocol.name" semantic conventions. It represents the application
+ // layer protocol used. The value SHOULD be normalized to lowercase.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ NetProtocolNameKey = attribute.Key("net.protocol.name")
+
+ // NetProtocolVersionKey is the attribute Key conforming to the
+ // "net.protocol.version" semantic conventions. It represents the version
+ // of the application layer protocol used. See note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3.1.1'
+ // Note: `net.protocol.version` refers to the version of the protocol used
+ // and might be different from the protocol client's version. If the HTTP
+ // client used has a version of `0.27.2`, but sends HTTP version `1.1`,
+ // this attribute should be set to `1.1`.
+ NetProtocolVersionKey = attribute.Key("net.protocol.version")
+
+ // NetSockPeerNameKey is the attribute Key conforming to the
+ // "net.sock.peer.name" semantic conventions. It represents the remote
+ // socket peer name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (If available and different from
+ // `net.peer.name` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 'proxy.example.com'
+ NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+
+ // NetSockPeerAddrKey is the attribute Key conforming to the
+ // "net.sock.peer.addr" semantic conventions. It represents the remote
+ // socket peer address: IPv4 or IPv6 for internet protocols, path for local
+ // communication,
+ // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '127.0.0.1', '/tmp/mysql.sock'
+ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+
+ // NetSockPeerPortKey is the attribute Key conforming to the
+ // "net.sock.peer.port" semantic conventions. It represents the remote
+ // socket peer port.
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If defined for the address family and if
+ // different than `net.peer.port` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 16456
+ NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+
+ // NetSockFamilyKey is the attribute Key conforming to the
+ // "net.sock.family" semantic conventions. It represents the protocol
+ // [address
+ // family](https://man7.org/linux/man-pages/man7/address_families.7.html)
+ // which is used for communication.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (If different than `inet` and if
+ // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers
+ // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in
+ // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support
+ // instrumentations that follow previous versions of this document.)
+ // Stability: stable
+ // Examples: 'inet6', 'bluetooth'
+ NetSockFamilyKey = attribute.Key("net.sock.family")
+
+ // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
+ // semantic conventions. It represents the logical remote hostname, see
+ // note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com'
+ // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an
+ // extra DNS lookup.
+ NetPeerNameKey = attribute.Key("net.peer.name")
+
+ // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
+ // semantic conventions. It represents the logical remote port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ NetPeerPortKey = attribute.Key("net.peer.port")
+
+ // NetHostNameKey is the attribute Key conforming to the "net.host.name"
+ // semantic conventions. It represents the logical local hostname or
+ // similar, see note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'localhost'
+ NetHostNameKey = attribute.Key("net.host.name")
+
+ // NetHostPortKey is the attribute Key conforming to the "net.host.port"
+ // semantic conventions. It represents the logical local port number,
+ // preferably the one that the peer used to connect
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 8080
+ NetHostPortKey = attribute.Key("net.host.port")
+
+ // NetSockHostAddrKey is the attribute Key conforming to the
+ // "net.sock.host.addr" semantic conventions. It represents the local
+ // socket address. Useful in case of a multi-IP host.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '192.168.0.1'
+ NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+
+ // NetSockHostPortKey is the attribute Key conforming to the
+ // "net.sock.host.port" semantic conventions. It represents the local
+ // socket port number.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If defined for the address
+ // family and if different than `net.host.port` and if `net.sock.host.addr`
+ // is set. In other cases, it is still recommended to set this.)
+ // Stability: stable
+ // Examples: 35555
+ NetSockHostPortKey = attribute.Key("net.sock.host.port")
+)
+
+var (
+ // ip_tcp
+ NetTransportTCP = NetTransportKey.String("ip_tcp")
+ // ip_udp
+ NetTransportUDP = NetTransportKey.String("ip_udp")
+ // Named or anonymous pipe. See note below
+ NetTransportPipe = NetTransportKey.String("pipe")
+ // In-process communication
+ NetTransportInProc = NetTransportKey.String("inproc")
+ // Something else (non IP-based)
+ NetTransportOther = NetTransportKey.String("other")
+)
+
+var (
+ // IPv4 address
+ NetSockFamilyInet = NetSockFamilyKey.String("inet")
+ // IPv6 address
+ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+ // Unix domain socket path
+ NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+// NetProtocolName returns an attribute KeyValue conforming to the
+// "net.protocol.name" semantic conventions. It represents the application
+// layer protocol used. The value SHOULD be normalized to lowercase.
+func NetProtocolName(val string) attribute.KeyValue {
+ return NetProtocolNameKey.String(val)
+}
+
+// NetProtocolVersion returns an attribute KeyValue conforming to the
+// "net.protocol.version" semantic conventions. It represents the version of
+// the application layer protocol used. See note below.
+func NetProtocolVersion(val string) attribute.KeyValue {
+ return NetProtocolVersionKey.String(val)
+}
+
+// NetSockPeerName returns an attribute KeyValue conforming to the
+// "net.sock.peer.name" semantic conventions. It represents the remote socket
+// peer name.
+func NetSockPeerName(val string) attribute.KeyValue {
+ return NetSockPeerNameKey.String(val)
+}
+
+// NetSockPeerAddr returns an attribute KeyValue conforming to the
+// "net.sock.peer.addr" semantic conventions. It represents the remote socket
+// peer address: IPv4 or IPv6 for internet protocols, path for local
+// communication,
+// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+func NetSockPeerAddr(val string) attribute.KeyValue {
+ return NetSockPeerAddrKey.String(val)
+}
+
+// NetSockPeerPort returns an attribute KeyValue conforming to the
+// "net.sock.peer.port" semantic conventions. It represents the remote socket
+// peer port.
+func NetSockPeerPort(val int) attribute.KeyValue {
+ return NetSockPeerPortKey.Int(val)
+}
+
+// NetPeerName returns an attribute KeyValue conforming to the
+// "net.peer.name" semantic conventions. It represents the logical remote
+// hostname, see note below.
+func NetPeerName(val string) attribute.KeyValue {
+ return NetPeerNameKey.String(val)
+}
+
+// NetPeerPort returns an attribute KeyValue conforming to the
+// "net.peer.port" semantic conventions. It represents the logical remote port
+// number
+func NetPeerPort(val int) attribute.KeyValue {
+ return NetPeerPortKey.Int(val)
+}
+
+// NetHostName returns an attribute KeyValue conforming to the
+// "net.host.name" semantic conventions. It represents the logical local
+// hostname or similar, see note below.
+func NetHostName(val string) attribute.KeyValue {
+ return NetHostNameKey.String(val)
+}
+
+// NetHostPort returns an attribute KeyValue conforming to the
+// "net.host.port" semantic conventions. It represents the logical local port
+// number, preferably the one that the peer used to connect
+func NetHostPort(val int) attribute.KeyValue {
+ return NetHostPortKey.Int(val)
+}
+
+// NetSockHostAddr returns an attribute KeyValue conforming to the
+// "net.sock.host.addr" semantic conventions. It represents the local socket
+// address. Useful in case of a multi-IP host.
+func NetSockHostAddr(val string) attribute.KeyValue {
+ return NetSockHostAddrKey.String(val)
+}
+
+// NetSockHostPort returns an attribute KeyValue conforming to the
+// "net.sock.host.port" semantic conventions. It represents the local socket
+// port number.
+func NetSockHostPort(val int) attribute.KeyValue {
+ return NetSockHostPortKey.Int(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetHostConnectionTypeKey is the attribute Key conforming to the
+ // "net.host.connection.type" semantic conventions. It represents the
+ // internet connection type currently being used by the host.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'wifi'
+ NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
+
+ // NetHostConnectionSubtypeKey is the attribute Key conforming to the
+ // "net.host.connection.subtype" semantic conventions. It represents the
+ // this describes more details regarding the connection.type. It may be the
+ // type of cell technology connection, but it could be used for describing
+ // details about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'LTE'
+ NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
+
+ // NetHostCarrierNameKey is the attribute Key conforming to the
+ // "net.host.carrier.name" semantic conventions. It represents the name of
+ // the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'sprint'
+ NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
+
+ // NetHostCarrierMccKey is the attribute Key conforming to the
+ // "net.host.carrier.mcc" semantic conventions. It represents the mobile
+ // carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '310'
+ NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
+
+ // NetHostCarrierMncKey is the attribute Key conforming to the
+ // "net.host.carrier.mnc" semantic conventions. It represents the mobile
+ // carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '001'
+ NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
+
+ // NetHostCarrierIccKey is the attribute Key conforming to the
+ // "net.host.carrier.icc" semantic conventions. It represents the ISO
+ // 3166-1 alpha-2 2-character country code associated with the mobile
+ // carrier network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'DE'
+ NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
+)
+
+var (
+ // wifi
+ NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
+ // wired
+ NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
+ // cell
+ NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
+ // unavailable
+ NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
+ // unknown
+ NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
+)
+
+var (
+ // GPRS
+ NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
+)
+
+// NetHostCarrierName returns an attribute KeyValue conforming to the
+// "net.host.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetHostCarrierName(val string) attribute.KeyValue {
+ return NetHostCarrierNameKey.String(val)
+}
+
+// NetHostCarrierMcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mcc" semantic conventions. It represents the mobile
+// carrier country code.
+func NetHostCarrierMcc(val string) attribute.KeyValue {
+ return NetHostCarrierMccKey.String(val)
+}
+
+// NetHostCarrierMnc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mnc" semantic conventions. It represents the mobile
+// carrier network code.
+func NetHostCarrierMnc(val string) attribute.KeyValue {
+ return NetHostCarrierMncKey.String(val)
+}
+
+// NetHostCarrierIcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetHostCarrierIcc(val string) attribute.KeyValue {
+ return NetHostCarrierIccKey.String(val)
+}
+
+// Semantic conventions for HTTP client and server Spans.
+const (
+ // HTTPRequestContentLengthKey is the attribute Key conforming to the
+ // "http.request_content_length" semantic conventions. It represents the
+ // size of the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+
+ // HTTPResponseContentLengthKey is the attribute Key conforming to the
+ // "http.response_content_length" semantic conventions. It represents the
+ // size of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+)
+
+// HTTPRequestContentLength returns an attribute KeyValue conforming to the
+// "http.request_content_length" semantic conventions. It represents the size
+// of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestContentLength(val int) attribute.KeyValue {
+ return HTTPRequestContentLengthKey.Int(val)
+}
+
+// HTTPResponseContentLength returns an attribute KeyValue conforming to the
+// "http.response_content_length" semantic conventions. It represents the size
+// of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseContentLength(val int) attribute.KeyValue {
+ return HTTPResponseContentLengthKey.Int(val)
+}
+
+// Semantic convention describing per-message attributes populated on messaging
+// spans or links.
+const (
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used
+ // by the messaging system as an identifier for the message, represented as
+ // a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents
+ // the [conversation ID](#conversations) identifying the conversation to
+ // which the message belongs, represented as a string. Sometimes called
+ // "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to
+ // the "messaging.message.payload_size_bytes" semantic conventions. It
+ // represents the (uncompressed) size of the message payload in bytes. Also
+ // use this attribute if it is unknown whether the compressed or
+ // uncompressed payload size is reported.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2738
+ MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
+
+ // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key
+ // conforming to the "messaging.message.payload_compressed_size_bytes"
+ // semantic conventions. It represents the compressed size of the message
+ // payload in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2048
+ MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
+)
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the [conversation ID](#conversations) identifying the
+// conversation to which the message belongs, represented as a string.
+// Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming
+// to the "messaging.message.payload_size_bytes" semantic conventions. It
+// represents the (uncompressed) size of the message payload in bytes. Also use
+// this attribute if it is unknown whether the compressed or uncompressed
+// payload size is reported.
+func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadSizeBytesKey.Int(val)
+}
+
+// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue
+// conforming to the "messaging.message.payload_compressed_size_bytes" semantic
+// conventions. It represents the compressed size of the message payload in
+// bytes.
+func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadCompressedSizeBytesKey.Int(val)
+}
+
+// Semantic convention for attributes that describe messaging destination on
+// broker
+const (
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the
+ // message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic
+ // or other entity within the broker. If
+ // the broker does not have such notion, the destination name SHOULD
+ // uniquely identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the
+ // low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example
+ // would be a destination name involving a user name or product id.
+ // Although the destination name in this case is of high cardinality, the
+ // underlying template is of low cardinality and can be effectively used
+ // for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might
+ // not exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+)
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// Semantic convention for attributes that describe messaging source on broker
+const (
+ // MessagingSourceNameKey is the attribute Key conforming to the
+ // "messaging.source.name" semantic conventions. It represents the message
+ // source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Source name SHOULD uniquely identify a specific queue, topic, or
+ // other entity within the broker. If
+ // the broker does not have such notion, the source name SHOULD uniquely
+ // identify the broker.
+ MessagingSourceNameKey = attribute.Key("messaging.source.name")
+
+ // MessagingSourceTemplateKey is the attribute Key conforming to the
+ // "messaging.source.template" semantic conventions. It represents the low
+ // cardinality representation of the messaging source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Source names could be constructed from templates. An example would
+ // be a source name involving a user name or product id. Although the
+ // source name in this case is of high cardinality, the underlying template
+ // is of low cardinality and can be effectively used for grouping and
+ // aggregation.
+ MessagingSourceTemplateKey = attribute.Key("messaging.source.template")
+
+ // MessagingSourceTemporaryKey is the attribute Key conforming to the
+ // "messaging.source.temporary" semantic conventions. It represents a
+ // boolean that is true if the message source is temporary and might not
+ // exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary")
+
+ // MessagingSourceAnonymousKey is the attribute Key conforming to the
+ // "messaging.source.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message source is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous")
+)
+
+// MessagingSourceName returns an attribute KeyValue conforming to the
+// "messaging.source.name" semantic conventions. It represents the message
+// source name
+func MessagingSourceName(val string) attribute.KeyValue {
+ return MessagingSourceNameKey.String(val)
+}
+
+// MessagingSourceTemplate returns an attribute KeyValue conforming to the
+// "messaging.source.template" semantic conventions. It represents the low
+// cardinality representation of the messaging source name
+func MessagingSourceTemplate(val string) attribute.KeyValue {
+ return MessagingSourceTemplateKey.String(val)
+}
+
+// MessagingSourceTemporary returns an attribute KeyValue conforming to the
+// "messaging.source.temporary" semantic conventions. It represents a boolean
+// that is true if the message source is temporary and might not exist anymore
+// after messages are processed.
+func MessagingSourceTemporary(val bool) attribute.KeyValue {
+ return MessagingSourceTemporaryKey.Bool(val)
+}
+
+// MessagingSourceAnonymous returns an attribute KeyValue conforming to the
+// "messaging.source.anonymous" semantic conventions. It represents a boolean
+// that is true if the message source is anonymous (could be unnamed or have
+// auto-generated name).
+func MessagingSourceAnonymous(val bool) attribute.KeyValue {
+ return MessagingSourceAnonymousKey.Bool(val)
+}
+
+// Attributes for RabbitMQ
+const (
+ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+ // conventions. It represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If not empty.)
+ // Stability: stable
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// Attributes for Apache Kafka
+const (
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the
+ // message keys in Kafka are used for grouping alike messages to ensure
+ // they're processed on the same partition. They differ from
+ // `messaging.message.id` in that they're not unique. If the key is `null`,
+ // the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to
+ // be supplied for the attribute. If the key has no unambiguous, canonical
+ // string form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+ // "messaging.kafka.consumer.group" semantic conventions. It represents the
+ // name of the Kafka Consumer Group that is handling the message. Only
+ // applies to consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+ // MessagingKafkaClientIDKey is the attribute Key conforming to the
+ // "messaging.kafka.client_id" semantic conventions. It represents the
+ // client ID for the Consumer or Producer that is handling the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'client-5'
+ MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
+
+ // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
+ // the "messaging.kafka.destination.partition" semantic conventions. It
+ // represents the partition the message is sent to.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+
+ // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the
+ // "messaging.kafka.source.partition" semantic conventions. It represents
+ // the partition the message is received from.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition")
+
+ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.message.offset" semantic conventions. It represents the
+ // offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents
+ // a boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (If value is `true`. When
+ // missing, the value is assumed to be `false`.)
+ // Stability: stable
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+ return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaClientID returns an attribute KeyValue conforming to the
+// "messaging.kafka.client_id" semantic conventions. It represents the client
+// ID for the Consumer or Producer that is handling the message.
+func MessagingKafkaClientID(val string) attribute.KeyValue {
+ return MessagingKafkaClientIDKey.String(val)
+}
+
+// MessagingKafkaDestinationPartition returns an attribute KeyValue
+// conforming to the "messaging.kafka.destination.partition" semantic
+// conventions. It represents the partition the message is sent to.
+func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
+ return MessagingKafkaDestinationPartitionKey.Int(val)
+}
+
+// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to
+// the "messaging.kafka.source.partition" semantic conventions. It represents
+// the partition the message is received from.
+func MessagingKafkaSourcePartition(val int) attribute.KeyValue {
+ return MessagingKafkaSourcePartitionKey.Int(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+ return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// Attributes for Apache RocketMQ
+const (
+ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_group" semantic conventions. It represents
+ // the name of the RocketMQ producer/consumer group that is handling the
+ // message. The client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+ // MessagingRocketmqClientIDKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_id" semantic conventions. It represents the
+ // unique identifier for each client.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myhost@8742@s8083jm'
+ MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
+
+ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delivery_timestamp"
+ // semantic conventions. It represents the timestamp in milliseconds that
+ // the delay message is expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delay time level is not specified.)
+ // Stability: stable
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+ // conventions. It represents the delay time level for delay message, which
+ // determines the message delay time.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delivery timestamp is not specified.)
+ // Stability: stable
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents
+ // the it is essential for FIFO message. Messages that belong to the same
+ // message group are always processed one by one within the same consumer
+ // group.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
+ // Stability: stable
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents
+ // the type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents
+ // the key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.consumption_model" semantic conventions. It
+ // represents the model of message consumption. This only applies to
+ // consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+ return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqClientID returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.client_id" semantic conventions. It represents the
+// unique identifier for each client.
+func MessagingRocketmqClientID(val string) attribute.KeyValue {
+ return MessagingRocketmqClientIDKey.String(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// Describes user-agent attributes.
+const (
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of
+ // the [HTTP
+ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+ // header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+)
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
new file mode 100644
index 000000000..0d1f55a8f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the conventions
+// as of the v1.20.0 version of the OpenTelemetry specification.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
new file mode 100644
index 000000000..637763932
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
@@ -0,0 +1,188 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This semantic convention defines the attributes used to represent a feature
+// flag evaluation as an event.
+const (
+ // FeatureFlagKeyKey is the attribute Key conforming to the
+ // "feature_flag.key" semantic conventions. It represents the unique
+ // identifier of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'logo-color'
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider_name" semantic conventions. It represents the
+ // name of the service provider that performs the flag evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'Flag Manager'
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+ // FeatureFlagVariantKey is the attribute Key conforming to the
+ // "feature_flag.variant" semantic conventions. It represents the sHOULD be
+ // a semantic identifier for a value. If one is unavailable, a stringified
+ // version of the value can be used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'red', 'true', 'on'
+ // Note: A semantic identifier, commonly referred to as a variant, provides
+ // a means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ //
+ // A stringified version of the value can be used in situations where a
+ // semantic identifier is unavailable. String representation of the value
+ // should be determined by the implementer.
+ FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+ return FeatureFlagVariantKey.String(val)
+}
+
+// RPC received/sent message.
+const (
+ // MessageTypeKey is the attribute Key conforming to the "message.type"
+ // semantic conventions. It represents the whether this is a received or
+ // sent message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageTypeKey = attribute.Key("message.type")
+
+ // MessageIDKey is the attribute Key conforming to the "message.id"
+ // semantic conventions. It represents the mUST be calculated as two
+ // different counters starting from `1` one for sent messages and one for
+ // received message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ MessageIDKey = attribute.Key("message.id")
+
+ // MessageCompressedSizeKey is the attribute Key conforming to the
+ // "message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+ // MessageUncompressedSizeKey is the attribute Key conforming to the
+ // "message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+ // sent
+ MessageTypeSent = MessageTypeKey.String("SENT")
+ // received
+ MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
+
+// MessageID returns an attribute KeyValue conforming to the "message.id"
+// semantic conventions. It represents the mUST be calculated as two different
+// counters starting from `1` one for sent messages and one for received
+// message.
+func MessageID(val int) attribute.KeyValue {
+ return MessageIDKey.Int(val)
+}
+
+// MessageCompressedSize returns an attribute KeyValue conforming to the
+// "message.compressed_size" semantic conventions. It represents the compressed
+// size of the message in bytes.
+func MessageCompressedSize(val int) attribute.KeyValue {
+ return MessageCompressedSizeKey.Int(val)
+}
+
+// MessageUncompressedSize returns an attribute KeyValue conforming to the
+// "message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func MessageUncompressedSize(val int) attribute.KeyValue {
+ return MessageUncompressedSizeKey.Int(val)
+}
+
+// The attributes used to report a single exception associated with a span.
+const (
+ // ExceptionEscapedKey is the attribute Key conforming to the
+ // "exception.escaped" semantic conventions. It represents the sHOULD be
+ // set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: An exception is considered to have escaped (or left) the scope of
+ // a span,
+ // if that span is ended while the exception is still logically "in
+ // flight".
+ // This may be actually "in flight" in some languages (e.g. if the
+ // exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most
+ // languages.
+ //
+ // It is usually not possible to determine at the point where an exception
+ // is thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending
+ // the span,
+ // as done in the [example above](#recording-an-exception).
+ //
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+ return ExceptionEscapedKey.Bool(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
new file mode 100644
index 000000000..f40c97825
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
new file mode 100644
index 000000000..9c1840631
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+// HTTP scheme attributes.
+var (
+ HTTPSchemeHTTP = HTTPSchemeKey.String("http")
+ HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
new file mode 100644
index 000000000..3d44dae27
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
@@ -0,0 +1,2060 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The web browser in which the application represented by the resource is
+// running. The `browser.*` attributes MUST be used only for resources that
+// represent applications running in a web browser (regardless of whether
+// running on a mobile or desktop device).
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://www.tencentcloud.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the
+ // "cloud.resource_id" semantic conventions. It represents the cloud
+ // provider-specific native identifier of the monitored cloud resource
+ // (e.g. an
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // on AWS, a [fully qualified resource
+ // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+ // on Azure, a [full resource
+ // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+ // on GCP)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+ // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+ // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud
+ // provider.
+ // The following well-known definitions MUST be used if you set this
+ // attribute and they apply:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider.
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+// on AWS, a [fully qualified resource
+// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+// on Azure, a [full resource
+// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+// on GCP)
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container
+ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS
+ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch
+ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the
+ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
+ // [ECS task
+ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the task
+ // definition family this task definition is a member of.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision
+ // for this task definition.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
+// task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the task
+// definition family this task definition is a member of.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// this task definition.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+ // EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// Resources specific to Amazon Web Services.
+const (
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of
+ // the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like
+ // multi-container applications, where a single application has sidecar
+ // containers, and each write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon
+ // Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s)
+ // of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+ // the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ // One log group can contain several log streams, so these ARNs necessarily
+ // identify both a log group and a log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+)
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// Heroku dyno metadata
+const (
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents
+ // the time and date the release was created
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2022-10-23T18:00:42Z'
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit
+ // hash for the current release
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+)
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
+// to the "heroku.release.creation_timestamp" semantic conventions. It
+// represents the time and date the release was created
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuAppID returns an attribute KeyValue conforming to the
+// "heroku.app.id" semantic conventions. It represents the unique identifier
+// for the application
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// A container instance.
+const (
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id"
+ // semantic conventions. It represents the container ID. Usually a UUID, as
+ // for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-identification).
+ // The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of
+ // the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageTagKey is the attribute Key conforming to the
+ // "container.image.tag" semantic conventions. It represents the container
+ // image tag.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ ContainerImageTagKey = attribute.Key("container.image.tag")
+)
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageTag returns an attribute KeyValue conforming to the
+// "container.image.tag" semantic conventions. It represents the container
+// image tag.
+func ContainerImageTag(val string) attribute.KeyValue {
+ return ContainerImageTagKey.String(val)
+}
+
+// The software deployment.
+const (
+ // DeploymentEnvironmentKey is the attribute Key conforming to the
+ // "deployment.environment" semantic conventions. It represents the name of
+ // the [deployment
+ // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'staging', 'production'
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment
+// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+// deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+ return DeploymentEnvironmentKey.String(val)
+}
+
+// The device on which the process represented by this resource is running.
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values
+ // outlined below. This value is not an advertising identifier and MUST NOT
+ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+ // to the [vendor
+ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+ // On Android (Java or Kotlin), this value MUST be equal to the Firebase
+ // Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on
+ // best practices and exact implementation details. Caution should be taken
+ // when storing personal data or anything which can identify a user. GDPR
+ // and data protection laws may apply, ensure you do your own due
+ // diligence.
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine readable version
+ // of the model identifier rather than the market or consumer-friendly name
+ // of the device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the
+ // "device.model.name" semantic conventions. It represents the marketing
+ // name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human readable version of
+ // the device model rather than a machine readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of
+ // the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// A serverless instance.
+const (
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this
+ // runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the
+ // FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes)
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The
+ // following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud
+ // providers/products:
+ //
+ // * **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version"
+ // semantic conventions. It represents the immutable version of the
+ // function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run:** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a
+ // string, that will be potentially reused for other invocations to the
+ // same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the
+ // "faas.max_memory" semantic conventions. It represents the amount of
+ // memory available to the serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 134217728
+ // Note: It's recommended to set this attribute since e.g. too little
+ // memory can easily stop a Java AWS Lambda function from working
+ // correctly. On AWS Lambda, the environment variable
+ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
+ // be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+)
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// A host is defined as a general computing instance.
+const (
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be
+ // the instance_id assigned by the cloud provider. For non-containerized
+ // systems, this should be the `machine-id`. See the table below for the
+ // sources to use to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified
+ // hostname, or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is
+ // running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostImageNameKey is the attribute Key conforming to the
+ // "host.image.name" semantic conventions. It represents the name of the VM
+ // image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the vM image ID. For Cloud, this
+ // value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version
+ // string of the VM image as defined in [Version
+ // Attributes](README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use
+// to determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID. For
+// Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image as defined in [Version
+// Attributes](README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// A Kubernetes Cluster.
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the
+ // "k8s.cluster.name" semantic conventions. It represents the name of the
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// A Kubernetes Node object.
+const (
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+ // semantic conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// A Kubernetes Namespace.
+const (
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// A Kubernetes Pod object.
+const (
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+ // semantic conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+ // semantic conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+)
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// A container in a
+// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the
+ // number of times the container was restarted. This attribute can be used
+ // to identify a particular container (running or stopped) within a
+ // container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// A Kubernetes ReplicaSet object.
+const (
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of
+ // the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+)
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// A Kubernetes Deployment object.
+const (
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of
+ // the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+)
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// A Kubernetes StatefulSet object.
+const (
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of
+ // the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+)
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// A Kubernetes DaemonSet object.
+const (
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the
+ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+)
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// A Kubernetes Job object.
+const (
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+ // semantic conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+ // semantic conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+)
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// A Kubernetes CronJob object.
+const (
+ // K8SCronJobUIDKey is the attribute Key conforming to the
+ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the
+ // "k8s.cronjob.name" semantic conventions. It represents the name of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+)
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to
+ // be parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+ // LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version"
+ // semantic conventions. It represents the version string of the operating
+ // system as defined in [Version
+ // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// An operating system process.
+const (
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid"
+ // semantic conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent
+ // Process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name
+ // of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+ // of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full
+ // path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessCommandKey is the attribute Key conforming to the
+ // "process.command" semantic conventions. It represents the command used
+ // to launch the process (i.e. the command name). On Linux based systems,
+ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+ // be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full
+ // command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`.
+ // Do not set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+ // this would be the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns
+ // the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+)
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// The single (language) runtime instance which is monitored.
+const (
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of
+ // the runtime of this process. For compiled native binaries, this SHOULD
+ // be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the
+ // version of the runtime of this process, as returned by the runtime
+ // without modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+)
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNameKey is the attribute Key conforming to the "service.name"
+ // semantic conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled
+ // services. If the value was not specified, SDKs MUST fallback to
+ // `unknown_service:` concatenated with
+ // [`process.executable.name`](process.md#process), e.g.
+ // `unknown_service:bash`. If `process.executable.name` is not available,
+ // the value MUST be set to `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+)
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group
+ // of services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name`
+ // is expected to be unique for all services that have no explicit
+ // namespace defined (so the empty/unspecified namespace is simply one more
+ // valid namespace). Zero-length namespace string is assumed equal to
+ // unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID
+ // of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-k8s-pod-deployment-1',
+ // '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be
+ // globally unique). The ID helps to distinguish instances of the same
+ // service that exist at the same time (e.g. instances of a horizontally
+ // scaled service). It is preferable for the ID to be persistent and stay
+ // the same for the lifetime of the service instance, however it is
+ // acceptable that the ID is ephemeral and changes during important
+ // lifetime events for the service (e.g. service restarts). If the service
+ // has no inherent unique ID that can be used as the value of this
+ // attribute it is recommended to generate a random Version 1 or Version 4
+ // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+ // Version 5, see RFC 4122 for more recommendations).
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceVersionKey is the attribute Key conforming to the
+ // "service.version" semantic conventions. It represents the version string
+ // of the service API or implementation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2.0.0'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the
+ // language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetryAutoVersionKey is the attribute Key conforming to the
+ // "telemetry.auto.version" semantic conventions. It represents the version
+ // string of the auto instrumentation agent, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
+)
+
+// TelemetryAutoVersion returns an attribute KeyValue conforming to the
+// "telemetry.auto.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent, if used.
+func TelemetryAutoVersion(val string) attribute.KeyValue {
+ return TelemetryAutoVersionKey.String(val)
+}
+
+// Resource describing the packaged software running the application code. Web
+// engines are typically executed using process.runtime.
+const (
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of
+ // the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the
+ // additional description of the web engine (e.g. detailed version and
+ // edition information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+)
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+ // OTelScopeNameKey is the attribute Key conforming to the
+ // "otel.scope.name" semantic conventions. It represents the name of the
+ // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of
+ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0.0'
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry
+// Scope's concepts.
+const (
+ // OTelLibraryNameKey is the attribute Key conforming to the
+ // "otel.library.name" semantic conventions. It represents the deprecated,
+ // use the `otel.scope.name` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelLibraryNameKey = attribute.Key("otel.library.name")
+
+ // OTelLibraryVersionKey is the attribute Key conforming to the
+ // "otel.library.version" semantic conventions. It represents the
+ // deprecated, use the `otel.scope.version` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '1.0.0'
+ OTelLibraryVersionKey = attribute.Key("otel.library.version")
+)
+
+// OTelLibraryName returns an attribute KeyValue conforming to the
+// "otel.library.name" semantic conventions. It represents the deprecated, use
+// the `otel.scope.name` attribute.
+func OTelLibraryName(val string) attribute.KeyValue {
+ return OTelLibraryNameKey.String(val)
+}
+
+// OTelLibraryVersion returns an attribute KeyValue conforming to the
+// "otel.library.version" semantic conventions. It represents the deprecated,
+// use the `otel.scope.version` attribute.
+func OTelLibraryVersion(val string) attribute.KeyValue {
+ return OTelLibraryVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
new file mode 100644
index 000000000..95d0210e3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/
+const SchemaURL = "https://opentelemetry.io/schemas/1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
new file mode 100644
index 000000000..90b1b0452
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
@@ -0,0 +1,2599 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the
+ // exception should be preferred over the static type in languages that
+ // support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Division by zero', "Can't convert 'int' object to str
+ // implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace
+ // as a string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+)
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// The attributes described in this section are rather generic. They may be
+// used in any Log Record they apply to.
+const (
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log
+ // Record.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an [Universally Unique Lexicographically Sortable
+ // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
+ // (e.g. UUID) may be used as needed.
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Span attributes used by AWS Lambda (in addition to general `faas`
+// attributes).
+const (
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full
+ // invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `cloud.resource_id` if an alias is
+ // involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for CloudEvents. CloudEvents is a specification on how to define
+// event data in a standard way. These attributes can be attached to spans when
+// performing operations with CloudEvents, regardless of the protocol being
+// used.
+const (
+ // CloudeventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the
+ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudeventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the
+ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://github.com/cloudevents',
+ // '/cloudevents/spec/pull/123', 'my-service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents
+ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+ // which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudeventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the
+ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.github.pull_request.opened',
+ // 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+
+ // CloudeventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the
+ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+ // of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+ return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+ return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+ return CloudeventsEventTypeKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+ return CloudeventsEventSubjectKey.String(val)
+}
+
+// Semantic conventions for the OpenTracing Shim
+const (
+ // OpentracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the
+ // parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span does not depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The attributes used to perform database client calls.
+const (
+ // DBSystemKey is the attribute Key conforming to the "db.system" semantic
+ // conventions. It represents an identifier for the database management
+ // system (DBMS) product being used. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ DBSystemKey = attribute.Key("db.system")
+
+ // DBConnectionStringKey is the attribute Key conforming to the
+ // "db.connection_string" semantic conventions. It represents the
+ // connection string used to connect to the database. It is recommended to
+ // remove embedded credentials.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+ DBConnectionStringKey = attribute.Key("db.connection_string")
+
+ // DBUserKey is the attribute Key conforming to the "db.user" semantic
+ // conventions. It represents the username for accessing the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'readonly_user', 'reporting_user'
+ DBUserKey = attribute.Key("db.user")
+
+ // DBJDBCDriverClassnameKey is the attribute Key conforming to the
+ // "db.jdbc.driver_classname" semantic conventions. It represents the
+ // fully-qualified class name of the [Java Database Connectivity
+ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
+ // driver used to connect.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'org.postgresql.Driver',
+ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+
+ // DBNameKey is the attribute Key conforming to the "db.name" semantic
+ // conventions. It represents the this attribute is used to report the name
+ // of the database being accessed. For commands that switch the database,
+ // this should be set to the target database (even if the command fails).
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If applicable.)
+ // Stability: stable
+ // Examples: 'customers', 'main'
+ // Note: In some SQL databases, the database name to be used is called
+ // "schema name". In case there are multiple layers that could be
+ // considered for database name (e.g. Oracle instance name and schema
+ // name), the database name to be used is the more specific layer (e.g.
+ // Oracle schema name).
+ DBNameKey = attribute.Key("db.name")
+
+ // DBStatementKey is the attribute Key conforming to the "db.statement"
+ // semantic conventions. It represents the database statement being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (Should be collected by default only if
+ // there is sanitization that excludes sensitive information.)
+ // Stability: stable
+ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+ DBStatementKey = attribute.Key("db.statement")
+
+ // DBOperationKey is the attribute Key conforming to the "db.operation"
+ // semantic conventions. It represents the name of the operation being
+ // executed, e.g. the [MongoDB command
+ // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+ // such as `findAndModify`, or the SQL keyword.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If `db.statement` is not
+ // applicable.)
+ // Stability: stable
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: When setting this to an SQL keyword, it is not recommended to
+ // attempt any client-side parsing of `db.statement` just to get this
+ // property, but it should be set if the operation name is provided by the
+ // library being instrumented. If the SQL statement has an ambiguous
+ // operation, or performs more than one operation, this value may be
+ // omitted.
+ DBOperationKey = attribute.Key("db.operation")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // Microsoft SQL Server Compact
+ DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+ // Cloud Spanner
+ DBSystemSpanner = DBSystemKey.String("spanner")
+ // Trino
+ DBSystemTrino = DBSystemKey.String("trino")
+)
+
+// DBConnectionString returns an attribute KeyValue conforming to the
+// "db.connection_string" semantic conventions. It represents the connection
+// string used to connect to the database. It is recommended to remove embedded
+// credentials.
+func DBConnectionString(val string) attribute.KeyValue {
+ return DBConnectionStringKey.String(val)
+}
+
+// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
+// conventions. It represents the username for accessing the database.
+func DBUser(val string) attribute.KeyValue {
+ return DBUserKey.String(val)
+}
+
+// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
+// "db.jdbc.driver_classname" semantic conventions. It represents the
+// fully-qualified class name of the [Java Database Connectivity
+// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+// used to connect.
+func DBJDBCDriverClassname(val string) attribute.KeyValue {
+ return DBJDBCDriverClassnameKey.String(val)
+}
+
+// DBName returns an attribute KeyValue conforming to the "db.name" semantic
+// conventions. It represents the this attribute is used to report the name of
+// the database being accessed. For commands that switch the database, this
+// should be set to the target database (even if the command fails).
+func DBName(val string) attribute.KeyValue {
+ return DBNameKey.String(val)
+}
+
+// DBStatement returns an attribute KeyValue conforming to the
+// "db.statement" semantic conventions. It represents the database statement
+// being executed.
+func DBStatement(val string) attribute.KeyValue {
+ return DBStatementKey.String(val)
+}
+
+// DBOperation returns an attribute KeyValue conforming to the
+// "db.operation" semantic conventions. It represents the name of the operation
+// being executed, e.g. the [MongoDB command
+// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+// such as `findAndModify`, or the SQL keyword.
+func DBOperation(val string) attribute.KeyValue {
+ return DBOperationKey.String(val)
+}
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+ // DBMSSQLInstanceNameKey is the attribute Key conforming to the
+ // "db.mssql.instance_name" semantic conventions. It represents the
+ // Microsoft SQL Server [instance
+ // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+ // connecting to. This name is used to determine the port of a named
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MSSQLSERVER'
+ // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no
+ // longer required (but still recommended if non-standard).
+ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
+// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
+// SQL Server [instance
+// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+// connecting to. This name is used to determine the port of a named instance.
+func DBMSSQLInstanceName(val string) attribute.KeyValue {
+ return DBMSSQLInstanceNameKey.String(val)
+}
+
+// Call-level attributes for Cassandra
+const (
+ // DBCassandraPageSizeKey is the attribute Key conforming to the
+ // "db.cassandra.page_size" semantic conventions. It represents the fetch
+ // size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "db.cassandra.consistency_level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+ // DBCassandraTableKey is the attribute Key conforming to the
+ // "db.cassandra.table" semantic conventions. It represents the name of the
+ // primary table that the operation is acting upon, including the keyspace
+ // name (if applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'mytable'
+ // Note: This mirrors the db.sql.table attribute but references cassandra
+ // rather than sql. It is not recommended to attempt any client-side
+ // parsing of `db.statement` just to get this property, but it should be
+ // set if it is provided by the library being instrumented. If the
+ // operation is acting upon an anonymous table, or more than one table,
+ // this value MUST NOT be set.
+ DBCassandraTableKey = attribute.Key("db.cassandra.table")
+
+ // DBCassandraIdempotenceKey is the attribute Key conforming to the
+ // "db.cassandra.idempotence" semantic conventions. It represents the
+ // whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+ // to the "db.cassandra.speculative_execution_count" semantic conventions.
+ // It represents the number of times a query was speculatively executed.
+ // Not set or `0` if the query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+
+ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+ // of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.dc" semantic conventions. It represents the
+ // data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+ return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraTable returns an attribute KeyValue conforming to the
+// "db.cassandra.table" semantic conventions. It represents the name of the
+// primary table that the operation is acting upon, including the keyspace name
+// (if applicable).
+func DBCassandraTable(val string) attribute.KeyValue {
+ return DBCassandraTableKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+ return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// Call-level attributes for Redis
+const (
+ // DBRedisDBIndexKey is the attribute Key conforming to the
+ // "db.redis.database_index" semantic conventions. It represents the index
+ // of the database being accessed as used in the [`SELECT`
+ // command](https://redis.io/commands/select), provided as an integer. To
+ // be used instead of the generic `db.name` attribute.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // database (`0`).)
+ // Stability: stable
+ // Examples: 0, 1, 15
+ DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// DBRedisDBIndex returns an attribute KeyValue conforming to the
+// "db.redis.database_index" semantic conventions. It represents the index of
+// the database being accessed as used in the [`SELECT`
+// command](https://redis.io/commands/select), provided as an integer. To be
+// used instead of the generic `db.name` attribute.
+func DBRedisDBIndex(val int) attribute.KeyValue {
+ return DBRedisDBIndexKey.Int(val)
+}
+
+// Call-level attributes for MongoDB
+const (
+ // DBMongoDBCollectionKey is the attribute Key conforming to the
+ // "db.mongodb.collection" semantic conventions. It represents the
+ // collection being accessed within the database stated in `db.name`.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'customers', 'products'
+ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// DBMongoDBCollection returns an attribute KeyValue conforming to the
+// "db.mongodb.collection" semantic conventions. It represents the collection
+// being accessed within the database stated in `db.name`.
+func DBMongoDBCollection(val string) attribute.KeyValue {
+ return DBMongoDBCollectionKey.String(val)
+}
+
+// Call-level attributes for SQL databases
+const (
+ // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
+ // semantic conventions. It represents the name of the primary table that
+ // the operation is acting upon, including the database name (if
+ // applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'public.users', 'customers'
+ // Note: It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting
+ // upon an anonymous table, or more than one table, this value MUST NOT be
+ // set.
+ DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
+// semantic conventions. It represents the name of the primary table that the
+// operation is acting upon, including the database name (if applicable).
+func DBSQLTable(val string) attribute.KeyValue {
+ return DBSQLTableKey.String(val)
+}
+
+// Call-level attributes for Cosmos DB.
+const (
+ // DBCosmosDBClientIDKey is the attribute Key conforming to the
+ // "db.cosmosdb.client_id" semantic conventions. It represents the unique
+ // Cosmos client instance id.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
+ DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
+
+ // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
+ // "db.cosmosdb.operation_type" semantic conventions. It represents the
+ // cosmosDB Operation Type.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (when performing one of the
+ // operations in this list)
+ // Stability: stable
+ DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
+
+ // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "db.cosmosdb.connection_mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as
+ // default))
+ // Stability: stable
+ DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
+
+ // DBCosmosDBContainerKey is the attribute Key conforming to the
+ // "db.cosmosdb.container" semantic conventions. It represents the cosmos
+ // DB container name.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if available)
+ // Stability: stable
+ // Examples: 'anystring'
+ DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
+
+ // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_content_length" semantic conventions. It represents
+ // the request payload size in bytes
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
+
+ // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
+ // DB status code.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (if response was received)
+ // Stability: stable
+ // Examples: 200, 201
+ DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
+
+ // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
+ // cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (when response was received and
+ // contained sub-code.)
+ // Stability: stable
+ // Examples: 1000, 1002
+ DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
+
+ // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+ // consumed for that operation
+ //
+ // Type: double
+ // RequirementLevel: ConditionallyRequired (when available)
+ // Stability: stable
+ // Examples: 46.18, 1.0
+ DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
+)
+
+var (
+ // invalid
+ DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
+ // create
+ DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
+ // patch
+ DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
+ // read
+ DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
+ // read_feed
+ DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
+ // delete
+ DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
+ // replace
+ DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
+ // execute
+ DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
+ // query
+ DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
+ // head
+ DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
+ // head_feed
+ DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
+ // upsert
+ DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
+ // batch
+ DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
+ // query_plan
+ DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
+ // execute_javascript
+ DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
+)
+
+var (
+ // Gateway (HTTP) connections mode
+ DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection
+ DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
+)
+
+// DBCosmosDBClientID returns an attribute KeyValue conforming to the
+// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+// Cosmos client instance id.
+func DBCosmosDBClientID(val string) attribute.KeyValue {
+ return DBCosmosDBClientIDKey.String(val)
+}
+
+// DBCosmosDBContainer returns an attribute KeyValue conforming to the
+// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
+// container name.
+func DBCosmosDBContainer(val string) attribute.KeyValue {
+ return DBCosmosDBContainerKey.String(val)
+}
+
+// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
+// to the "db.cosmosdb.request_content_length" semantic conventions. It
+// represents the request payload size in bytes
+func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
+ return DBCosmosDBRequestContentLengthKey.Int(val)
+}
+
+// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
+// status code.
+func DBCosmosDBStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
+// DB sub status code.
+func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBSubStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
+// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+// consumed for that operation
+func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
+ return DBCosmosDBRequestChargeKey.Float64(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
+// concepts.
+const (
+ // OTelStatusCodeKey is the attribute Key conforming to the
+ // "otel.status_code" semantic conventions. It represents the name of the
+ // code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the
+ // description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'resource not found'
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// This semantic convention describes an instance of a function that runs
+// without provisioning or managing of servers (also known as serverless
+// functions or Function as a Service (FaaS)) with spans.
+const (
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+ // semantic conventions. It represents the type of the trigger which caused
+ // this function invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: For the server/consumer span on the incoming side,
+ // `faas.trigger` MUST be set.
+ //
+ // Clients invoking FaaS instances usually cannot set `faas.trigger`,
+ // since they would typically need to look in the payload to determine
+ // the event type. If clients set it, it should be the same as the
+ // trigger that corresponding incoming would have (i.e., this has
+ // nothing to do with the underlying transport used to make the API
+ // call to invoke the lambda, which is often HTTP).
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation
+ // ID of the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID
+// of the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// Semantic Convention for FaaS triggered as a response to some data source
+// operation such as a database or filesystem read/write.
+const (
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name
+ // of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in
+ // Cosmos DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the
+ // describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string
+ // containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or
+ // S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation
+ // time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron
+ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+)
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the
+ // serverless function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// Contains additional attributes for outgoing FaaS spans.
+const (
+ // FaaSInvokedNameKey is the attribute Key conforming to the
+ // "faas.invoked_name" semantic conventions. It represents the name of the
+ // invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the
+ // invoked function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud
+ // region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (For some cloud providers, like
+ // AWS or GCP, the region in which a function is hosted is essential to
+ // uniquely identify the function and also part of its endpoint. Since it's
+ // part of the endpoint being called, the region is always known to
+ // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
+ // If the region is unknown to the client or not required for identifying
+ // the invoked function, setting `faas.invoked_region` is optional.)
+ // Stability: stable
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the
+ // invoked function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service"
+ // semantic conventions. It represents the
+ // [`service.name`](../../resource/semantic_conventions/README.md#service)
+ // of the remote service. SHOULD be equal to the actual `service.name`
+ // resource attribute of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](../../resource/semantic_conventions/README.md#service) of
+// the remote service. SHOULD be equal to the actual `service.name` resource
+// attribute of the remote service if any.
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// These attributes may be used for any operation with an authenticated and/or
+// authorized enduser.
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id"
+ // semantic conventions. It represents the username or client_id extracted
+ // from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+ // in the inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+ // semantic conventions. It represents the actual/assumed role the client
+ // is making the request under extracted from token or application security
+ // context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+
+ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+ // semantic conventions. It represents the scopes or granted authorities
+ // the client currently possesses extracted from token or application
+ // security context. The value would come from the scope associated with an
+ // [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+ // value in a [SAML 2.0
+ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+ return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+ return EnduserScopeKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed
+ // to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name"
+ // semantic conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+ // CodeFunctionKey is the attribute Key conforming to the "code.function"
+ // semantic conventions. It represents the method or function name, or
+ // equivalent (usually rightmost part of the code unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+
+ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+ // semantic conventions. It represents the "namespace" within which
+ // `code.function` is defined. Usually the qualified class or module name,
+ // such that `code.namespace` + some separator + `code.function` form a
+ // unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+
+ // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+ // semantic conventions. It represents the source code file name that
+ // identifies the code unit as uniquely as possible (preferably an absolute
+ // file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+ // semantic conventions. It represents the line number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+
+ // CodeColumnKey is the attribute Key conforming to the "code.column"
+ // semantic conventions. It represents the column number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+)
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+ return CodeFunctionKey.String(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+ return CodeNamespaceKey.String(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+ return CodeFilepathKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+ return CodeColumnKey.Int(val)
+}
+
+// Semantic Convention for HTTP Client
+const (
+ // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
+ // conventions. It represents the full HTTP request URL in the form
+ // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is
+ // not transmitted over HTTP, but if it is known, it should be included
+ // nevertheless.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+ // Note: `http.url` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case the
+ // attribute's value should be `https://www.example.com/`.
+ HTTPURLKey = attribute.Key("http.url")
+
+ // HTTPResendCountKey is the attribute Key conforming to the
+ // "http.resend_count" semantic conventions. It represents the ordinal
+ // number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Recommended (if and only if request was retried.)
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending
+ // (e.g. redirection, authorization failure, 503 Server Unavailable,
+ // network issues, or any other).
+ HTTPResendCountKey = attribute.Key("http.resend_count")
+)
+
+// HTTPURL returns an attribute KeyValue conforming to the "http.url"
+// semantic conventions. It represents the full HTTP request URL in the form
+// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not
+// transmitted over HTTP, but if it is known, it should be included
+// nevertheless.
+func HTTPURL(val string) attribute.KeyValue {
+ return HTTPURLKey.String(val)
+}
+
+// HTTPResendCount returns an attribute KeyValue conforming to the
+// "http.resend_count" semantic conventions. It represents the ordinal number
+// of request resending attempt (for any reason, including redirects).
+func HTTPResendCount(val int) attribute.KeyValue {
+ return HTTPResendCountKey.Int(val)
+}
+
+// Semantic Convention for HTTP Server
+const (
+ // HTTPTargetKey is the attribute Key conforming to the "http.target"
+ // semantic conventions. It represents the full request target as passed in
+ // a HTTP request line or equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '/users/12314/?q=ddds'
+ HTTPTargetKey = attribute.Key("http.target")
+
+ // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip"
+ // semantic conventions. It represents the IP address of the original
+ // client behind all proxies, if known (e.g. from
+ // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '83.164.160.102'
+ // Note: This is not necessarily the same as `net.sock.peer.addr`, which
+ // would
+ // identify the network-level peer, which may be a proxy.
+ //
+ // This attribute should be set when a source of information different
+ // from the one used for `net.sock.peer.addr`, is available even if that
+ // other
+ // source just confirms the same value as `net.sock.peer.addr`.
+ // Rationale: For `net.sock.peer.addr`, one typically does not know if it
+ // comes from a proxy, reverse proxy, or the actual client. Setting
+ // `http.client_ip` when it's the same as `net.sock.peer.addr` means that
+ // one is at least somewhat confident that the address is not that of
+ // the closest proxy.
+ HTTPClientIPKey = attribute.Key("http.client_ip")
+)
+
+// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
+// semantic conventions. It represents the full request target as passed in a
+// HTTP request line or equivalent.
+func HTTPTarget(val string) attribute.KeyValue {
+ return HTTPTargetKey.String(val)
+}
+
+// HTTPClientIP returns an attribute KeyValue conforming to the
+// "http.client_ip" semantic conventions. It represents the IP address of the
+// original client behind all proxies, if known (e.g. from
+// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+func HTTPClientIP(val string) attribute.KeyValue {
+ return HTTPClientIPKey.String(val)
+}
+
+// The `aws` conventions apply to operations using the AWS SDK. They map
+// request or response parameters in AWS SDK API calls to attributes on a Span.
+// The conventions have been collected over time based on feedback from AWS
+// users of tracing and will continue to evolve as new interesting conventions
+// are found.
+// Some descriptions are also provided for populating general OpenTelemetry
+// semantic conventions based on these APIs.
+const (
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in
+ // the response headers `x-amz-request-id` or `x-amz-requestid`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+)
+
+// AWSRequestID returns an attribute KeyValue conforming to the
+// "aws.request_id" semantic conventions. It represents the AWS request ID as
+// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+)
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// DynamoDB.CreateTable
+const (
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// DynamoDB.ListTables
+const (
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the the
+// number of items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// DynamoDB.Query
+const (
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// DynamoDB.Scan
+const (
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+)
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// DynamoDB.UpdateTable
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // the `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// Attributes that exist for S3 request types.
+const (
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request
+ // refers to. Corresponds to the `--bucket` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'some-bucket-name'
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'someFile.yml'
+ // Note: The `key` attribute is applicable to all object-related S3
+ // operations, i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // -
+ // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
+ // -
+ // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
+ // -
+ // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
+ // -
+ // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
+ // -
+ // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source
+ // object (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'someFile.yml'
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the
+ // "aws.s3.upload_id" semantic conventions. It represents the upload ID
+ // that identifies the multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
+ // Note: The `upload_id` attribute applies to S3 multipart-upload
+ // operations and corresponds to the `--upload-id` parameter
+ // of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
+ // Note: The `delete` attribute is only applicable to the
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number
+ // of the part being uploaded in a multipart-upload operation. This is a
+ // positive integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // and
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ // operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter
+ // of the
+ // [upload-part operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+)
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the
+// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
+// request refers to. Corresponds to the `--bucket` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
+// semantic conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object
+// (in the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the
+// "aws.s3.delete" semantic conventions. It represents the delete request
+// container that specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// Semantic conventions to apply when instrumenting the GraphQL implementation.
+// They map GraphQL operations to attributes on a Span.
+const (
+ // GraphqlOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of
+ // the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphqlOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of
+ // the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+
+ // GraphqlDocumentKey is the attribute Key conforming to the
+ // "graphql.document" semantic conventions. It represents the GraphQL
+ // document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+ return GraphqlOperationNameKey.String(val)
+}
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+ return GraphqlDocumentKey.String(val)
+}
+
+// General attributes used in messaging systems.
+const (
+ // MessagingSystemKey is the attribute Key conforming to the
+ // "messaging.system" semantic conventions. It represents a string
+ // identifying the messaging system.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+ MessagingSystemKey = attribute.Key("messaging.system")
+
+ // MessagingOperationKey is the attribute Key conforming to the
+ // "messaging.operation" semantic conventions. It represents a string
+ // identifying the kind of messaging operation as defined in the [Operation
+ // names](#operation-names) section above.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationKey = attribute.Key("messaging.operation")
+
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the span describes an
+ // operation on a batch of messages.)
+ // Stability: stable
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client
+ // library supports both batch and single-message API for the same
+ // operation, instrumentations SHOULD use `messaging.batch.message_count`
+ // for batching APIs and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+)
+
+var (
+ // publish
+ MessagingOperationPublish = MessagingOperationKey.String("publish")
+ // receive
+ MessagingOperationReceive = MessagingOperationKey.String("receive")
+ // process
+ MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// MessagingSystem returns an attribute KeyValue conforming to the
+// "messaging.system" semantic conventions. It represents a string identifying
+// the messaging system.
+func MessagingSystem(val string) attribute.KeyValue {
+ return MessagingSystemKey.String(val)
+}
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// Semantic convention for a consumer of messages received from a messaging
+// system
+const (
+ // MessagingConsumerIDKey is the attribute Key conforming to the
+ // "messaging.consumer.id" semantic conventions. It represents the
+ // identifier for the consumer receiving a message. For Kafka, set it to
+ // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if
+ // both are present, or only `messaging.kafka.consumer.group`. For brokers,
+ // such as RabbitMQ and Artemis, set it to the `client_id` of the client
+ // consuming the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mygroup - client-6'
+ MessagingConsumerIDKey = attribute.Key("messaging.consumer.id")
+)
+
+// MessagingConsumerID returns an attribute KeyValue conforming to the
+// "messaging.consumer.id" semantic conventions. It represents the identifier
+// for the consumer receiving a message. For Kafka, set it to
+// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both
+// are present, or only `messaging.kafka.consumer.group`. For brokers, such as
+// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
+// message.
+func MessagingConsumerID(val string) attribute.KeyValue {
+ return MessagingConsumerIDKey.String(val)
+}
+
+// Semantic conventions for remote procedure calls.
+const (
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system"
+ // semantic conventions. It represents a string identifying the remoting
+ // system. See below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCSystemKey = attribute.Key("rpc.system")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service"
+ // semantic conventions. It represents the full (logical) name of the
+ // service being called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // class. The `code.namespace` attribute may be used to store the latter
+ // (despite the attribute name, it may include a class name; e.g., class
+ // with method actually executing the call on the server side, RPC client
+ // stub class on the client side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method"
+ // semantic conventions. It represents the name of the (logical) method
+ // being called, must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+ // Connect RPC
+ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// Tech-specific attributes for gRPC.
+const (
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+ // status
+ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+ // the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+ // RPCJsonrpcVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // does not specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // version (`1.0`))
+ // Stability: stable
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be
+ // cast to string for simplicity. Use empty string in case of `null` value.
+ // Omit entirely if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If response is not successful.)
+ // Stability: stable
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+)
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// does not specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+ return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+ return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+ return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+ return RPCJsonrpcErrorMessageKey.String(val)
+}
+
+// Tech-specific attributes for Connect RPC.
+const (
+ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.connect_rpc.error_code" semantic conventions. It represents the
+ // [error codes](https://connect.build/docs/protocol/#error-codes) of the
+ // Connect request. Error codes are always string values.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (If response is not successful
+ // and if error code available.)
+ // Stability: stable
+ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+)
+
+var (
+ // cancelled
+ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+ // unknown
+ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+ // invalid_argument
+ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+ // deadline_exceeded
+ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+ // not_found
+ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+ // already_exists
+ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+ // permission_denied
+ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+ // resource_exhausted
+ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+ // failed_precondition
+ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+ // aborted
+ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+ // out_of_range
+ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+ // unimplemented
+ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+ // internal
+ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+ // unavailable
+ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+ // data_loss
+ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+ // unauthenticated
+ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
new file mode 100644
index 000000000..2de1fc3c6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.26.0
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
new file mode 100644
index 000000000..d8dc822b2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
@@ -0,0 +1,8996 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The Android platform on which the Android application is running.
+const (
+ // AndroidOSAPILevelKey is the attribute Key conforming to the
+ // "android.os.api_level" semantic conventions. It represents the uniquely
+ // identifies the framework API revision offered by a version
+ // (`os.version`) of the android operating system. More information can be
+ // found
+ // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '33', '32'
+ AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
+)
+
+// AndroidOSAPILevel returns an attribute KeyValue conforming to the
+// "android.os.api_level" semantic conventions. It represents the uniquely
+// identifies the framework API revision offered by a version (`os.version`) of
+// the android operating system. More information can be found
+// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+func AndroidOSAPILevel(val string) attribute.KeyValue {
+ return AndroidOSAPILevelKey.String(val)
+}
+
+// ASP.NET Core attributes
+const (
+ // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the
+ // "aspnetcore.rate_limiting.result" semantic conventions. It represents
+ // the rate-limiting result, shows whether the lease was acquired or
+ // contains a rejection reason
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'acquired', 'request_canceled'
+ AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result")
+
+ // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to
+ // the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+ // represents the full type name of the
+ // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+ // implementation that handled the exception.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if and only if the exception
+ // was handled by this handler.)
+ // Stability: stable
+ // Examples: 'Contoso.MyHandler'
+ AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type")
+
+ // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming
+ // to the "aspnetcore.diagnostics.exception.result" semantic conventions.
+ // It represents the aSP.NET Core exception middleware handling result
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'handled', 'unhandled'
+ AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result")
+
+ // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the
+ // "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+ // the rate limiting policy name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'fixed', 'sliding', 'token'
+ AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy")
+
+ // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the
+ // "aspnetcore.request.is_unhandled" semantic conventions. It represents
+ // the flag indicating if request was handled by the application pipeline.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: True
+ AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled")
+
+ // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the
+ // "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+ // value that indicates whether the matched route is a fallback route.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: True
+ AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback")
+
+ // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the
+ // "aspnetcore.routing.match_status" semantic conventions. It represents
+ // the match result - success or failure
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'success', 'failure'
+ AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status")
+)
+
+var (
+ // Lease was acquired
+ AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired")
+ // Lease request was rejected by the endpoint limiter
+ AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter")
+ // Lease request was rejected by the global limiter
+ AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter")
+ // Lease request was canceled
+ AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled")
+)
+
+var (
+ // Exception was handled by the exception handling middleware
+ AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled")
+ // Exception was not handled by the exception handling middleware
+ AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled")
+ // Exception handling was skipped because the response had started
+ AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped")
+ // Exception handling didn't run because the request was aborted
+ AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted")
+)
+
+var (
+ // Match succeeded
+ AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success")
+ // Match failed
+ AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure")
+)
+
+// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming
+// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+// represents the full type name of the
+// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+// implementation that handled the exception.
+func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue {
+ return AspnetcoreDiagnosticsHandlerTypeKey.String(val)
+}
+
+// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to
+// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+// the rate limiting policy name.
+func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue {
+ return AspnetcoreRateLimitingPolicyKey.String(val)
+}
+
+// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to
+// the "aspnetcore.request.is_unhandled" semantic conventions. It represents
+// the flag indicating if request was handled by the application pipeline.
+func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue {
+ return AspnetcoreRequestIsUnhandledKey.Bool(val)
+}
+
+// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to
+// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+// value that indicates whether the matched route is a fallback route.
+func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue {
+ return AspnetcoreRoutingIsFallbackKey.Bool(val)
+}
+
+// Generic attributes for AWS services.
+const (
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in
+ // the response headers `x-amz-request-id` or `x-amz-requestid`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+)
+
+// AWSRequestID returns an attribute KeyValue conforming to the
+// "aws.request_id" semantic conventions. It represents the AWS request ID as
+// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// Attributes for AWS DynamoDB.
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the number of
+// items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// Attributes for AWS Elastic Container Service (ECS).
+const (
+ // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id"
+ // semantic conventions. It represents the ID of a running ECS task. The ID
+ // MUST be extracted from `task.arn`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is
+ // populated.)
+ // Stability: experimental
+ // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b',
+ // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
+ AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS
+ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container
+ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch
+ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the
+ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a
+ // running [ECS
+ // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b',
+ // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the family
+ // name of the [ECS task
+ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
+ // used to create the ECS task.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision
+ // for the task definition used to create the ECS task.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSTaskID returns an attribute KeyValue conforming to the
+// "aws.ecs.task.id" semantic conventions. It represents the ID of a running
+// ECS task. The ID MUST be extracted from `task.arn`.
+func AWSECSTaskID(val string) attribute.KeyValue {
+ return AWSECSTaskIDKey.String(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running
+// [ECS
+// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the family name of
+// the [ECS task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
+// used to create the ECS task.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// the task definition used to create the ECS task.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// Attributes for AWS Elastic Kubernetes Service (EKS).
+const (
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+ // EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// Attributes for AWS Logs.
+const (
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon
+ // Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of
+ // the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like
+ // multi-container applications, where a single application has sidecar
+ // containers, and each write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+ // the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ // One log group can contain several log streams, so these ARNs necessarily
+ // identify both a log group and a log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s)
+ // of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+)
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// Attributes for AWS Lambda.
+const (
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full
+ // invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `cloud.resource_id` if an alias is
+ // involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for AWS S3.
+const (
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request
+ // refers to. Corresponds to the `--bucket` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'some-bucket-name'
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source
+ // object (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'someFile.yml'
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
+ // Note: The `delete` attribute is only applicable to the
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'someFile.yml'
+ // Note: The `key` attribute is applicable to all object-related S3
+ // operations, i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // -
+ // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
+ // -
+ // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
+ // -
+ // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
+ // -
+ // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
+ // -
+ // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number
+ // of the part being uploaded in a multipart-upload operation. This is a
+ // positive integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // and
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ // operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter
+ // of the
+ // [upload-part operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the
+ // "aws.s3.upload_id" semantic conventions. It represents the upload ID
+ // that identifies the multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
+ // Note: The `upload_id` attribute applies to S3 multipart-upload
+ // operations and corresponds to the `--upload-id` parameter
+ // of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+)
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the
+// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
+// request refers to. Corresponds to the `--bucket` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object
+// (in the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the
+// "aws.s3.delete" semantic conventions. It represents the delete request
+// container that specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
+// semantic conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// The web browser attributes
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// These attributes may be used to describe the client in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+ // ClientAddressKey is the attribute Key conforming to the "client.address"
+ // semantic conventions. It represents the client address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix
+ // domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.address` SHOULD represent the client address
+ // behind any intermediaries, for example proxies, if it's available.
+ ClientAddressKey = attribute.Key("client.address")
+
+ // ClientPortKey is the attribute Key conforming to the "client.port"
+ // semantic conventions. It represents the client port number.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.port` SHOULD represent the client port behind
+ // any intermediaries, for example proxies, if it's available.
+ ClientPortKey = attribute.Key("client.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the
+// "client.address" semantic conventions. It represents the client address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func ClientAddress(val string) attribute.KeyValue {
+ return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number.
+func ClientPort(val int) attribute.KeyValue {
+ return ClientPortKey.Int(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS).
+const (
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://www.tencentcloud.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the
+ // "cloud.resource_id" semantic conventions. It represents the cloud
+ // provider-specific native identifier of the monitored cloud resource
+ // (e.g. an
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // on AWS, a [fully qualified resource
+ // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id)
+ // on Azure, a [full resource
+ // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+ // on GCP)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+ // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+ // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud
+ // provider.
+ // The following well-known definitions MUST be used if you set this
+ // attribute and they apply:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider.
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Apps
+ CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Bare Metal Solution (BMS)
+ CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+// on AWS, a [fully qualified resource
+// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on
+// Azure, a [full resource
+// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+// on GCP)
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// Attributes for CloudEvents.
+const (
+ // CloudeventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the
+ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudeventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the
+ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'https://github.com/cloudevents',
+ // '/cloudevents/spec/pull/123', 'my-service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents
+ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+ // which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudeventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the
+ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+ // of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+
+ // CloudeventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the
+ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'com.github.pull_request.opened',
+ // 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+ return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+ return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+ return CloudeventsEventSubjectKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+ return CloudeventsEventTypeKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+ // CodeColumnKey is the attribute Key conforming to the "code.column"
+ // semantic conventions. It represents the column number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+
+ // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+ // semantic conventions. It represents the source code file name that
+ // identifies the code unit as uniquely as possible (preferably an absolute
+ // file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+
+ // CodeFunctionKey is the attribute Key conforming to the "code.function"
+ // semantic conventions. It represents the method or function name, or
+ // equivalent (usually rightmost part of the code unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+ // semantic conventions. It represents the line number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+
+ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+ // semantic conventions. It represents the "namespace" within which
+ // `code.function` is defined. Usually the qualified class or module name,
+ // such that `code.namespace` + some separator + `code.function` form a
+ // unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+
+ // CodeStacktraceKey is the attribute Key conforming to the
+ // "code.stacktrace" semantic conventions. It represents a stacktrace as a
+ // string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'at
+ // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ CodeStacktraceKey = attribute.Key("code.stacktrace")
+)
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+ return CodeColumnKey.Int(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+ return CodeFilepathKey.String(val)
+}
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+ return CodeFunctionKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+ return CodeNamespaceKey.String(val)
+}
+
+// CodeStacktrace returns an attribute KeyValue conforming to the
+// "code.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func CodeStacktrace(val string) attribute.KeyValue {
+ return CodeStacktraceKey.String(val)
+}
+
+// A container instance.
+const (
+ // ContainerCommandKey is the attribute Key conforming to the
+ // "container.command" semantic conventions. It represents the command used
+ // to run the container (i.e. the command name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol'
+ // Note: If using embedded credentials or sensitive data, it is recommended
+ // to remove them to prevent potential leakage.
+ ContainerCommandKey = attribute.Key("container.command")
+
+ // ContainerCommandArgsKey is the attribute Key conforming to the
+ // "container.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) run by the
+ // container. [2]
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol, --config, config.yaml'
+ ContainerCommandArgsKey = attribute.Key("container.command_args")
+
+ // ContainerCommandLineKey is the attribute Key conforming to the
+ // "container.command_line" semantic conventions. It represents the full
+ // command run by the container as a single string representing the full
+ // command. [2]
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol --config config.yaml'
+ ContainerCommandLineKey = attribute.Key("container.command_line")
+
+ // ContainerCPUStateKey is the attribute Key conforming to the
+ // "container.cpu.state" semantic conventions. It represents the CPU state
+ // for this data point.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'user', 'kernel'
+ ContainerCPUStateKey = attribute.Key("container.cpu.state")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id"
+ // semantic conventions. It represents the container ID. Usually a UUID, as
+ // for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-identification).
+ // The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerImageIDKey is the attribute Key conforming to the
+ // "container.image.id" semantic conventions. It represents the runtime
+ // specific image identifier. Usually a hash algorithm followed by a UUID.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
+ // Note: Docker defines a sha256 of the image id; `container.image.id`
+ // corresponds to the `Image` field from the Docker container inspect
+ // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
+ // endpoint.
+ // K8S defines a link to the container registry repository with digest
+ // `"imageID": "registry.azurecr.io
+ // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
+ // The ID is assigned by the container runtime and can vary in different
+ // environments. Consider using `oci.manifest.digest` if it is important to
+ // identify the same image in different environments/runtimes.
+ ContainerImageIDKey = attribute.Key("container.image.id")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of
+ // the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageRepoDigestsKey is the attribute Key conforming to the
+ // "container.image.repo_digests" semantic conventions. It represents the
+ // repo digests of the container image as provided by the container
+ // runtime.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb',
+ // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578'
+ // Note:
+ // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect)
+ // and
+ // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238)
+ // report those under the `RepoDigests` field.
+ ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
+
+ // ContainerImageTagsKey is the attribute Key conforming to the
+ // "container.image.tags" semantic conventions. It represents the container
+ // image tags. An example can be found in [Docker Image
+ // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+ // Should be only the `` section of the full name for example from
+ // `registry.example.com/my-org/my-image:`.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'v1.27.1', '3.5.7-0'
+ ContainerImageTagsKey = attribute.Key("container.image.tags")
+
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+)
+
+var (
+ // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows)
+ ContainerCPUStateUser = ContainerCPUStateKey.String("user")
+ // When CPU is used by the system (host OS)
+ ContainerCPUStateSystem = ContainerCPUStateKey.String("system")
+ // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows)
+ ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel")
+)
+
+// ContainerCommand returns an attribute KeyValue conforming to the
+// "container.command" semantic conventions. It represents the command used to
+// run the container (i.e. the command name).
+func ContainerCommand(val string) attribute.KeyValue {
+ return ContainerCommandKey.String(val)
+}
+
+// ContainerCommandArgs returns an attribute KeyValue conforming to the
+// "container.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) run by the
+// container. [2]
+func ContainerCommandArgs(val ...string) attribute.KeyValue {
+ return ContainerCommandArgsKey.StringSlice(val)
+}
+
+// ContainerCommandLine returns an attribute KeyValue conforming to the
+// "container.command_line" semantic conventions. It represents the full
+// command run by the container as a single string representing the full
+// command. [2]
+func ContainerCommandLine(val string) attribute.KeyValue {
+ return ContainerCommandLineKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerImageID returns an attribute KeyValue conforming to the
+// "container.image.id" semantic conventions. It represents the runtime
+// specific image identifier. Usually a hash algorithm followed by a UUID.
+func ContainerImageID(val string) attribute.KeyValue {
+ return ContainerImageIDKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
+// "container.image.repo_digests" semantic conventions. It represents the repo
+// digests of the container image as provided by the container runtime.
+func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
+ return ContainerImageRepoDigestsKey.StringSlice(val)
+}
+
+// ContainerImageTags returns an attribute KeyValue conforming to the
+// "container.image.tags" semantic conventions. It represents the container
+// image tags. An example can be found in [Docker Image
+// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+// Should be only the `` section of the full name for example from
+// `registry.example.com/my-org/my-image:`.
+func ContainerImageTags(val ...string) attribute.KeyValue {
+ return ContainerImageTagsKey.StringSlice(val)
+}
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// This group defines the attributes used to describe telemetry in the context
+// of databases.
+const (
+ // DBClientConnectionsPoolNameKey is the attribute Key conforming to the
+ // "db.client.connections.pool.name" semantic conventions. It represents
+ // the name of the connection pool; unique within the instrumented
+ // application. In case the connection pool implementation doesn't provide
+ // a name, instrumentation should use a combination of `server.address` and
+ // `server.port` attributes formatted as `server.address:server.port`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myDataSource'
+ DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name")
+
+ // DBClientConnectionsStateKey is the attribute Key conforming to the
+ // "db.client.connections.state" semantic conventions. It represents the
+ // state of a connection in the pool
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'idle'
+ DBClientConnectionsStateKey = attribute.Key("db.client.connections.state")
+
+ // DBCollectionNameKey is the attribute Key conforming to the
+ // "db.collection.name" semantic conventions. It represents the name of a
+ // collection (table, container) within the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'public.users', 'customers'
+ // Note: If the collection name is parsed from the query, it SHOULD match
+ // the value provided in the query and may be qualified with the schema and
+ // database name.
+ // It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ DBCollectionNameKey = attribute.Key("db.collection.name")
+
+ // DBNamespaceKey is the attribute Key conforming to the "db.namespace"
+ // semantic conventions. It represents the name of the database, fully
+ // qualified within the server address and port.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'customers', 'test.users'
+ // Note: If a database system has multiple namespace components, they
+ // SHOULD be concatenated (potentially using database system specific
+ // conventions) from most general to most specific namespace component, and
+ // more specific namespaces SHOULD NOT be captured without the more general
+ // namespaces, to ensure that "startswith" queries for the more general
+ // namespaces will be valid.
+ // Semantic conventions for individual database systems SHOULD document
+ // what `db.namespace` means in the context of that system.
+ // It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ DBNamespaceKey = attribute.Key("db.namespace")
+
+ // DBOperationNameKey is the attribute Key conforming to the
+ // "db.operation.name" semantic conventions. It represents the name of the
+ // operation or command being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: It is RECOMMENDED to capture the value as provided by the
+ // application without attempting to do any case normalization.
+ DBOperationNameKey = attribute.Key("db.operation.name")
+
+ // DBQueryTextKey is the attribute Key conforming to the "db.query.text"
+ // semantic conventions. It represents the database query being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey
+ // "WuValue"'
+ DBQueryTextKey = attribute.Key("db.query.text")
+
+ // DBSystemKey is the attribute Key conforming to the "db.system" semantic
+ // conventions. It represents the database management system (DBMS) product
+ // as identified by the client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The actual DBMS may differ from the one identified by the client.
+ // For example, when using PostgreSQL client libraries to connect to a
+ // CockroachDB, the `db.system` is set to `postgresql` based on the
+ // instrumentation's best knowledge.
+ DBSystemKey = attribute.Key("db.system")
+)
+
+var (
+ // idle
+ DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle")
+ // used
+ DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // Microsoft SQL Server Compact
+ DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+ // Cloud Spanner
+ DBSystemSpanner = DBSystemKey.String("spanner")
+ // Trino
+ DBSystemTrino = DBSystemKey.String("trino")
+)
+
+// DBClientConnectionsPoolName returns an attribute KeyValue conforming to
+// the "db.client.connections.pool.name" semantic conventions. It represents
+// the name of the connection pool; unique within the instrumented application.
+// In case the connection pool implementation doesn't provide a name,
+// instrumentation should use a combination of `server.address` and
+// `server.port` attributes formatted as `server.address:server.port`.
+func DBClientConnectionsPoolName(val string) attribute.KeyValue {
+ return DBClientConnectionsPoolNameKey.String(val)
+}
+
+// DBCollectionName returns an attribute KeyValue conforming to the
+// "db.collection.name" semantic conventions. It represents the name of a
+// collection (table, container) within the database.
+func DBCollectionName(val string) attribute.KeyValue {
+ return DBCollectionNameKey.String(val)
+}
+
+// DBNamespace returns an attribute KeyValue conforming to the
+// "db.namespace" semantic conventions. It represents the name of the database,
+// fully qualified within the server address and port.
+func DBNamespace(val string) attribute.KeyValue {
+ return DBNamespaceKey.String(val)
+}
+
+// DBOperationName returns an attribute KeyValue conforming to the
+// "db.operation.name" semantic conventions. It represents the name of the
+// operation or command being executed.
+func DBOperationName(val string) attribute.KeyValue {
+ return DBOperationNameKey.String(val)
+}
+
+// DBQueryText returns an attribute KeyValue conforming to the
+// "db.query.text" semantic conventions. It represents the database query being
+// executed.
+func DBQueryText(val string) attribute.KeyValue {
+ return DBQueryTextKey.String(val)
+}
+
+// This group defines attributes for Cassandra.
+const (
+ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "db.cassandra.consistency_level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.dc" semantic conventions. It represents the
+ // data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+
+ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+ // of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+ // DBCassandraIdempotenceKey is the attribute Key conforming to the
+ // "db.cassandra.idempotence" semantic conventions. It represents the
+ // whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+ // DBCassandraPageSizeKey is the attribute Key conforming to the
+ // "db.cassandra.page_size" semantic conventions. It represents the fetch
+ // size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+ // to the "db.cassandra.speculative_execution_count" semantic conventions.
+ // It represents the number of times a query was speculatively executed.
+ // Not set or `0` if the query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+ return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+ return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// This group defines attributes for Azure Cosmos DB.
+const (
+ // DBCosmosDBClientIDKey is the attribute Key conforming to the
+ // "db.cosmosdb.client_id" semantic conventions. It represents the unique
+ // Cosmos client instance id.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
+ DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
+
+ // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "db.cosmosdb.connection_mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
+
+ // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
+ // "db.cosmosdb.operation_type" semantic conventions. It represents the
+ // cosmosDB Operation Type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
+
+ // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+ // consumed for that operation
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 46.18, 1.0
+ DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
+
+ // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_content_length" semantic conventions. It represents
+ // the request payload size in bytes
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
+
+ // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
+ // DB status code.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 200, 201
+ DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
+
+ // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
+ // cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1000, 1002
+ DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
+)
+
+var (
+ // Gateway (HTTP) connections mode
+ DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection
+ DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
+)
+
+var (
+ // invalid
+ DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
+ // create
+ DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
+ // patch
+ DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
+ // read
+ DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
+ // read_feed
+ DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
+ // delete
+ DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
+ // replace
+ DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
+ // execute
+ DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
+ // query
+ DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
+ // head
+ DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
+ // head_feed
+ DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
+ // upsert
+ DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
+ // batch
+ DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
+ // query_plan
+ DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
+ // execute_javascript
+ DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
+)
+
+// DBCosmosDBClientID returns an attribute KeyValue conforming to the
+// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+// Cosmos client instance id.
+func DBCosmosDBClientID(val string) attribute.KeyValue {
+ return DBCosmosDBClientIDKey.String(val)
+}
+
+// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
+// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+// consumed for that operation
+func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
+ return DBCosmosDBRequestChargeKey.Float64(val)
+}
+
+// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
+// to the "db.cosmosdb.request_content_length" semantic conventions. It
+// represents the request payload size in bytes
+func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
+ return DBCosmosDBRequestContentLengthKey.Int(val)
+}
+
+// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
+// status code.
+func DBCosmosDBStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
+// DB sub status code.
+func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBSubStatusCodeKey.Int(val)
+}
+
+// This group defines attributes for Elasticsearch.
+const (
+ // DBElasticsearchClusterNameKey is the attribute Key conforming to the
+ // "db.elasticsearch.cluster.name" semantic conventions. It represents the
+ // represents the identifier of an Elasticsearch cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f'
+ DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name")
+
+ // DBElasticsearchNodeNameKey is the attribute Key conforming to the
+ // "db.elasticsearch.node.name" semantic conventions. It represents the
+ // represents the human-readable identifier of the node/instance to which a
+ // request was routed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'instance-0000000001'
+ DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name")
+)
+
+// DBElasticsearchClusterName returns an attribute KeyValue conforming to
+// the "db.elasticsearch.cluster.name" semantic conventions. It represents the
+// represents the identifier of an Elasticsearch cluster.
+func DBElasticsearchClusterName(val string) attribute.KeyValue {
+ return DBElasticsearchClusterNameKey.String(val)
+}
+
+// DBElasticsearchNodeName returns an attribute KeyValue conforming to the
+// "db.elasticsearch.node.name" semantic conventions. It represents the
+// represents the human-readable identifier of the node/instance to which a
+// request was routed.
+func DBElasticsearchNodeName(val string) attribute.KeyValue {
+ return DBElasticsearchNodeNameKey.String(val)
+}
+
+// Attributes for software deployments.
+const (
+ // DeploymentEnvironmentKey is the attribute Key conforming to the
+ // "deployment.environment" semantic conventions. It represents the name of
+ // the [deployment
+ // environment](https://wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'staging', 'production'
+ // Note: `deployment.environment` does not affect the uniqueness
+ // constraints defined through
+ // the `service.namespace`, `service.name` and `service.instance.id`
+ // resource attributes.
+ // This implies that resources carrying the following attribute
+ // combinations MUST be
+ // considered to be identifying the same service:
+ //
+ // * `service.name=frontend`, `deployment.environment=production`
+ // * `service.name=frontend`, `deployment.environment=staging`.
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment environment](https://wikipedia.org/wiki/Deployment_environment)
+// (aka deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+ return DeploymentEnvironmentKey.String(val)
+}
+
+// Attributes that represents an occurrence of a lifecycle transition on the
+// Android platform.
+const (
+ // AndroidStateKey is the attribute Key conforming to the "android.state"
+ // semantic conventions. It represents the deprecated use the
+ // `device.app.lifecycle` event definition including `android.state` as a
+ // payload field instead.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The Android lifecycle states are defined in [Activity lifecycle
+ // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc),
+ // and from which the `OS identifiers` are derived.
+ AndroidStateKey = attribute.Key("android.state")
+)
+
+var (
+ // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time
+ AndroidStateCreated = AndroidStateKey.String("created")
+ // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state
+ AndroidStateBackground = AndroidStateKey.String("background")
+ // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states
+ AndroidStateForeground = AndroidStateKey.String("foreground")
+)
+
+// These attributes may be used to describe the receiver of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API doesn't expose a clear notion of
+// client and server.
+const (
+ // DestinationAddressKey is the attribute Key conforming to the
+ // "destination.address" semantic conventions. It represents the
+ // destination address - domain name if available without reverse DNS
+ // lookup; otherwise, IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the source side, and when communicating through
+ // an intermediary, `destination.address` SHOULD represent the destination
+ // address behind any intermediaries, for example proxies, if it's
+ // available.
+ DestinationAddressKey = attribute.Key("destination.address")
+
+ // DestinationPortKey is the attribute Key conforming to the
+ // "destination.port" semantic conventions. It represents the destination
+ // port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3389, 2888
+ DestinationPortKey = attribute.Key("destination.port")
+)
+
+// DestinationAddress returns an attribute KeyValue conforming to the
+// "destination.address" semantic conventions. It represents the destination
+// address - domain name if available without reverse DNS lookup; otherwise, IP
+// address or Unix domain socket name.
+func DestinationAddress(val string) attribute.KeyValue {
+ return DestinationAddressKey.String(val)
+}
+
+// DestinationPort returns an attribute KeyValue conforming to the
+// "destination.port" semantic conventions. It represents the destination port
+// number
+func DestinationPort(val int) attribute.KeyValue {
+ return DestinationPortKey.Int(val)
+}
+
+// Describes device attributes.
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values
+ // outlined below. This value is not an advertising identifier and MUST NOT
+ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+ // to the [vendor
+ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+ // On Android (Java or Kotlin), this value MUST be equal to the Firebase
+ // Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on
+ // best practices and exact implementation details. Caution should be taken
+ // when storing personal data or anything which can identify a user. GDPR
+ // and data protection laws may apply, ensure you do your own due
+ // diligence.
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of
+ // the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine-readable version
+ // of the model identifier rather than the market or consumer-friendly name
+ // of the device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the
+ // "device.model.name" semantic conventions. It represents the marketing
+ // name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human-readable version of
+ // the device model rather than a machine-readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// These attributes may be used for any disk related operation.
+const (
+ // DiskIoDirectionKey is the attribute Key conforming to the
+ // "disk.io.direction" semantic conventions. It represents the disk IO
+ // operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'read'
+ DiskIoDirectionKey = attribute.Key("disk.io.direction")
+)
+
+var (
+ // read
+ DiskIoDirectionRead = DiskIoDirectionKey.String("read")
+ // write
+ DiskIoDirectionWrite = DiskIoDirectionKey.String("write")
+)
+
+// The shared attributes used to report a DNS query.
+const (
+ // DNSQuestionNameKey is the attribute Key conforming to the
+ // "dns.question.name" semantic conventions. It represents the name being
+ // queried.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'www.example.com', 'opentelemetry.io'
+ // Note: If the name field contains non-printable characters (below 32 or
+ // above 126), those characters should be represented as escaped base 10
+ // integers (\DDD). Back slashes and quotes should be escaped. Tabs,
+ // carriage returns, and line feeds should be converted to \t, \r, and \n
+ // respectively.
+ DNSQuestionNameKey = attribute.Key("dns.question.name")
+)
+
+// DNSQuestionName returns an attribute KeyValue conforming to the
+// "dns.question.name" semantic conventions. It represents the name being
+// queried.
+func DNSQuestionName(val string) attribute.KeyValue {
+ return DNSQuestionNameKey.String(val)
+}
+
+// Attributes for operations with an authenticated and/or authorized enduser.
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id"
+ // semantic conventions. It represents the username or client_id extracted
+ // from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+ // in the inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+ // semantic conventions. It represents the actual/assumed role the client
+ // is making the request under extracted from token or application security
+ // context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+
+ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+ // semantic conventions. It represents the scopes or granted authorities
+ // the client currently possesses extracted from token or application
+ // security context. The value would come from the scope associated with an
+ // [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+ // value in a [SAML 2.0
+ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+ return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+ return EnduserScopeKey.String(val)
+}
+
+// The shared attributes used to report an error.
+const (
+ // ErrorTypeKey is the attribute Key conforming to the "error.type"
+ // semantic conventions. It represents the describes a class of error the
+ // operation ended with.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'timeout', 'java.net.UnknownHostException',
+ // 'server_certificate_invalid', '500'
+ // Note: The `error.type` SHOULD be predictable, and SHOULD have low
+ // cardinality.
+ //
+ // When `error.type` is set to a type (e.g., an exception type), its
+ // canonical class name identifying the type within the artifact SHOULD be
+ // used.
+ //
+ // Instrumentations SHOULD document the list of errors they report.
+ //
+ // The cardinality of `error.type` within one instrumentation library
+ // SHOULD be low.
+ // Telemetry consumers that aggregate data from multiple instrumentation
+ // libraries and applications
+ // should be prepared for `error.type` to have high cardinality at query
+ // time when no
+ // additional filters are applied.
+ //
+ // If the operation has completed successfully, instrumentations SHOULD NOT
+ // set `error.type`.
+ //
+ // If a specific domain defines its own set of error identifiers (such as
+ // HTTP or gRPC status codes),
+ // it's RECOMMENDED to:
+ //
+ // * Use a domain-specific attribute
+ // * Set `error.type` to capture all errors, regardless of whether they are
+ // defined within the domain-specific set or not.
+ ErrorTypeKey = attribute.Key("error.type")
+)
+
+var (
+ // A fallback error value to be used when the instrumentation doesn't define a custom value
+ ErrorTypeOther = ErrorTypeKey.String("_OTHER")
+)
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the identifies the class / type of
+ // event.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'browser.mouse.click', 'device.app.lifecycle'
+ // Note: Event names are subject to the same rules as [attribute
+ // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md).
+ // Notably, event names are namespaced to avoid collisions and provide a
+ // clean separation of semantics for events in separate domains like
+ // browser, mobile, and kubernetes.
+ EventNameKey = attribute.Key("event.name")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the identifies the class / type of
+// event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+ // ExceptionEscapedKey is the attribute Key conforming to the
+ // "exception.escaped" semantic conventions. It represents the sHOULD be
+ // set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: An exception is considered to have escaped (or left) the scope of
+ // a span,
+ // if that span is ended while the exception is still logically "in
+ // flight".
+ // This may be actually "in flight" in some languages (e.g. if the
+ // exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most
+ // languages.
+ //
+ // It is usually not possible to determine at the point where an exception
+ // is thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending
+ // the span,
+ // as done in the [example for recording span
+ // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception).
+ //
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Division by zero', "Can't convert 'int' object to str
+ // implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace
+ // as a string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the
+ // exception should be preferred over the static type in languages that
+ // support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+ return ExceptionEscapedKey.Bool(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// FaaS attributes
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the
+ // serverless function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron
+ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name
+ // of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in
+ // Cosmos DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or
+ // S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the
+ // describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string
+ // containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a
+ // string, that will be potentially reused for other invocations to the
+ // same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation
+ // ID of the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+
+ // FaaSInvokedNameKey is the attribute Key conforming to the
+ // "faas.invoked_name" semantic conventions. It represents the name of the
+ // invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the
+ // invoked function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud
+ // region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the
+ // invoked function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the
+ // "faas.max_memory" semantic conventions. It represents the amount of
+ // memory available to the serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 134217728
+ // Note: It's recommended to set this attribute since e.g. too little
+ // memory can easily stop a Java AWS Lambda function from working
+ // correctly. On AWS Lambda, the environment variable
+ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
+ // be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this
+ // runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the
+ // FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes)
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The
+ // following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud
+ // providers/products:
+ //
+ // * **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation
+ // time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+ // semantic conventions. It represents the type of the trigger which caused
+ // this function invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version"
+ // semantic conventions. It represents the immutable version of the
+ // function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run (Services):** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID
+// of the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// Attributes for Feature Flags.
+const (
+ // FeatureFlagKeyKey is the attribute Key conforming to the
+ // "feature_flag.key" semantic conventions. It represents the unique
+ // identifier of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'logo-color'
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider_name" semantic conventions. It represents the
+ // name of the service provider that performs the flag evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Flag Manager'
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+ // FeatureFlagVariantKey is the attribute Key conforming to the
+ // "feature_flag.variant" semantic conventions. It represents the sHOULD be
+ // a semantic identifier for a value. If one is unavailable, a stringified
+ // version of the value can be used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'red', 'true', 'on'
+ // Note: A semantic identifier, commonly referred to as a variant, provides
+ // a means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ //
+ // A stringified version of the value can be used in situations where a
+ // semantic identifier is unavailable. String representation of the value
+ // should be determined by the implementer.
+ FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+ return FeatureFlagVariantKey.String(val)
+}
+
+// Describes file attributes.
+const (
+ // FileDirectoryKey is the attribute Key conforming to the "file.directory"
+ // semantic conventions. It represents the directory where the file is
+ // located. It should include the drive letter, when appropriate.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/home/user', 'C:\\Program Files\\MyApp'
+ FileDirectoryKey = attribute.Key("file.directory")
+
+ // FileExtensionKey is the attribute Key conforming to the "file.extension"
+ // semantic conventions. It represents the file extension, excluding the
+ // leading dot.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'png', 'gz'
+ // Note: When the file name has multiple extensions (example.tar.gz), only
+ // the last one should be captured ("gz", not "tar.gz").
+ FileExtensionKey = attribute.Key("file.extension")
+
+ // FileNameKey is the attribute Key conforming to the "file.name" semantic
+ // conventions. It represents the name of the file including the extension,
+ // without the directory.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'example.png'
+ FileNameKey = attribute.Key("file.name")
+
+ // FilePathKey is the attribute Key conforming to the "file.path" semantic
+ // conventions. It represents the full path to the file, including the file
+ // name. It should include the drive letter, when appropriate.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/home/alice/example.png', 'C:\\Program
+ // Files\\MyApp\\myapp.exe'
+ FilePathKey = attribute.Key("file.path")
+
+ // FileSizeKey is the attribute Key conforming to the "file.size" semantic
+ // conventions. It represents the file size in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FileSizeKey = attribute.Key("file.size")
+)
+
+// FileDirectory returns an attribute KeyValue conforming to the
+// "file.directory" semantic conventions. It represents the directory where the
+// file is located. It should include the drive letter, when appropriate.
+func FileDirectory(val string) attribute.KeyValue {
+ return FileDirectoryKey.String(val)
+}
+
+// FileExtension returns an attribute KeyValue conforming to the
+// "file.extension" semantic conventions. It represents the file extension,
+// excluding the leading dot.
+func FileExtension(val string) attribute.KeyValue {
+ return FileExtensionKey.String(val)
+}
+
+// FileName returns an attribute KeyValue conforming to the "file.name"
+// semantic conventions. It represents the name of the file including the
+// extension, without the directory.
+func FileName(val string) attribute.KeyValue {
+ return FileNameKey.String(val)
+}
+
+// FilePath returns an attribute KeyValue conforming to the "file.path"
+// semantic conventions. It represents the full path to the file, including the
+// file name. It should include the drive letter, when appropriate.
+func FilePath(val string) attribute.KeyValue {
+ return FilePathKey.String(val)
+}
+
+// FileSize returns an attribute KeyValue conforming to the "file.size"
+// semantic conventions. It represents the file size in bytes.
+func FileSize(val int) attribute.KeyValue {
+ return FileSizeKey.Int(val)
+}
+
+// Attributes for Google Cloud Run.
+const (
+ // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.execution" semantic conventions. It represents the
+ // name of the Cloud Run
+ // [execution](https://cloud.google.com/run/docs/managing/job-executions)
+ // being run for the Job, as set by the
+ // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+ // environment variable.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'job-name-xxxx', 'sample-job-mdw84'
+ GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
+
+ // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.task_index" semantic conventions. It represents the
+ // index for a task within an execution as provided by the
+ // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+ // environment variable.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 1
+ GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
+)
+
+// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
+// of the Cloud Run
+// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
+// run for the Job, as set by the
+// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobExecution(val string) attribute.KeyValue {
+ return GCPCloudRunJobExecutionKey.String(val)
+}
+
+// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+// for a task within an execution as provided by the
+// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
+ return GCPCloudRunJobTaskIndexKey.Int(val)
+}
+
+// Attributes for Google Compute Engine (GCE).
+const (
+ // GCPGceInstanceHostnameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.hostname" semantic conventions. It represents the
+ // hostname of a GCE instance. This is the full value of the default or
+ // [custom
+ // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-host1234.example.com',
+ // 'sample-vm.us-west1-b.c.my-project.internal'
+ GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
+
+ // GCPGceInstanceNameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.name" semantic conventions. It represents the instance
+ // name of a GCE instance. This is the value provided by `host.name`, the
+ // visible name of the instance in the Cloud Console UI, and the prefix for
+ // the default hostname of the instance as defined by the [default internal
+ // DNS
+ // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'instance-1', 'my-vm-name'
+ GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
+)
+
+// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+// of a GCE instance. This is the full value of the default or [custom
+// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+func GCPGceInstanceHostname(val string) attribute.KeyValue {
+ return GCPGceInstanceHostnameKey.String(val)
+}
+
+// GCPGceInstanceName returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.name" semantic conventions. It represents the instance
+// name of a GCE instance. This is the value provided by `host.name`, the
+// visible name of the instance in the Cloud Console UI, and the prefix for the
+// default hostname of the instance as defined by the [default internal DNS
+// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+func GCPGceInstanceName(val string) attribute.KeyValue {
+ return GCPGceInstanceNameKey.String(val)
+}
+
+// The attributes used to describe telemetry in the context of LLM (Large
+// Language Models) requests and responses.
+const (
+ // GenAiCompletionKey is the attribute Key conforming to the
+ // "gen_ai.completion" semantic conventions. It represents the full
+ // response received from the LLM.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: "[{'role': 'assistant', 'content': 'The capital of France is
+ // Paris.'}]"
+ // Note: It's RECOMMENDED to format completions as JSON string matching
+ // [OpenAI messages
+ // format](https://platform.openai.com/docs/guides/text-generation)
+ GenAiCompletionKey = attribute.Key("gen_ai.completion")
+
+ // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt"
+ // semantic conventions. It represents the full prompt sent to an LLM.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: "[{'role': 'user', 'content': 'What is the capital of
+ // France?'}]"
+ // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI
+ // messages
+ // format](https://platform.openai.com/docs/guides/text-generation)
+ GenAiPromptKey = attribute.Key("gen_ai.prompt")
+
+ // GenAiRequestMaxTokensKey is the attribute Key conforming to the
+ // "gen_ai.request.max_tokens" semantic conventions. It represents the
+ // maximum number of tokens the LLM generates for a request.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 100
+ GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens")
+
+ // GenAiRequestModelKey is the attribute Key conforming to the
+ // "gen_ai.request.model" semantic conventions. It represents the name of
+ // the LLM a request is being made to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'gpt-4'
+ GenAiRequestModelKey = attribute.Key("gen_ai.request.model")
+
+ // GenAiRequestTemperatureKey is the attribute Key conforming to the
+ // "gen_ai.request.temperature" semantic conventions. It represents the
+ // temperature setting for the LLM request.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0.0
+ GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature")
+
+ // GenAiRequestTopPKey is the attribute Key conforming to the
+ // "gen_ai.request.top_p" semantic conventions. It represents the top_p
+ // sampling setting for the LLM request.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0
+ GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p")
+
+ // GenAiResponseFinishReasonsKey is the attribute Key conforming to the
+ // "gen_ai.response.finish_reasons" semantic conventions. It represents the
+ // array of reasons the model stopped generating tokens, corresponding to
+ // each generation received.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'stop'
+ GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons")
+
+ // GenAiResponseIDKey is the attribute Key conforming to the
+ // "gen_ai.response.id" semantic conventions. It represents the unique
+ // identifier for the completion.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'chatcmpl-123'
+ GenAiResponseIDKey = attribute.Key("gen_ai.response.id")
+
+ // GenAiResponseModelKey is the attribute Key conforming to the
+ // "gen_ai.response.model" semantic conventions. It represents the name of
+ // the LLM a response was generated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'gpt-4-0613'
+ GenAiResponseModelKey = attribute.Key("gen_ai.response.model")
+
+ // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system"
+ // semantic conventions. It represents the Generative AI product as
+ // identified by the client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'openai'
+ // Note: The actual GenAI product may differ from the one identified by the
+ // client. For example, when using OpenAI client libraries to communicate
+ // with Mistral, the `gen_ai.system` is set to `openai` based on the
+ // instrumentation's best knowledge.
+ GenAiSystemKey = attribute.Key("gen_ai.system")
+
+ // GenAiUsageCompletionTokensKey is the attribute Key conforming to the
+ // "gen_ai.usage.completion_tokens" semantic conventions. It represents the
+ // number of tokens used in the LLM response (completion).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 180
+ GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens")
+
+ // GenAiUsagePromptTokensKey is the attribute Key conforming to the
+ // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the
+ // number of tokens used in the LLM prompt.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 100
+ GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens")
+)
+
+var (
+ // OpenAI
+ GenAiSystemOpenai = GenAiSystemKey.String("openai")
+)
+
+// GenAiCompletion returns an attribute KeyValue conforming to the
+// "gen_ai.completion" semantic conventions. It represents the full response
+// received from the LLM.
+func GenAiCompletion(val string) attribute.KeyValue {
+ return GenAiCompletionKey.String(val)
+}
+
+// GenAiPrompt returns an attribute KeyValue conforming to the
+// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to
+// an LLM.
+func GenAiPrompt(val string) attribute.KeyValue {
+ return GenAiPromptKey.String(val)
+}
+
+// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the
+// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
+// number of tokens the LLM generates for a request.
+func GenAiRequestMaxTokens(val int) attribute.KeyValue {
+ return GenAiRequestMaxTokensKey.Int(val)
+}
+
+// GenAiRequestModel returns an attribute KeyValue conforming to the
+// "gen_ai.request.model" semantic conventions. It represents the name of the
+// LLM a request is being made to.
+func GenAiRequestModel(val string) attribute.KeyValue {
+ return GenAiRequestModelKey.String(val)
+}
+
+// GenAiRequestTemperature returns an attribute KeyValue conforming to the
+// "gen_ai.request.temperature" semantic conventions. It represents the
+// temperature setting for the LLM request.
+func GenAiRequestTemperature(val float64) attribute.KeyValue {
+ return GenAiRequestTemperatureKey.Float64(val)
+}
+
+// GenAiRequestTopP returns an attribute KeyValue conforming to the
+// "gen_ai.request.top_p" semantic conventions. It represents the top_p
+// sampling setting for the LLM request.
+func GenAiRequestTopP(val float64) attribute.KeyValue {
+ return GenAiRequestTopPKey.Float64(val)
+}
+
+// GenAiResponseFinishReasons returns an attribute KeyValue conforming to
+// the "gen_ai.response.finish_reasons" semantic conventions. It represents the
+// array of reasons the model stopped generating tokens, corresponding to each
+// generation received.
+func GenAiResponseFinishReasons(val ...string) attribute.KeyValue {
+ return GenAiResponseFinishReasonsKey.StringSlice(val)
+}
+
+// GenAiResponseID returns an attribute KeyValue conforming to the
+// "gen_ai.response.id" semantic conventions. It represents the unique
+// identifier for the completion.
+func GenAiResponseID(val string) attribute.KeyValue {
+ return GenAiResponseIDKey.String(val)
+}
+
+// GenAiResponseModel returns an attribute KeyValue conforming to the
+// "gen_ai.response.model" semantic conventions. It represents the name of the
+// LLM a response was generated from.
+func GenAiResponseModel(val string) attribute.KeyValue {
+ return GenAiResponseModelKey.String(val)
+}
+
+// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to
+// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the
+// number of tokens used in the LLM response (completion).
+func GenAiUsageCompletionTokens(val int) attribute.KeyValue {
+ return GenAiUsageCompletionTokensKey.Int(val)
+}
+
+// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the
+// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number
+// of tokens used in the LLM prompt.
+func GenAiUsagePromptTokens(val int) attribute.KeyValue {
+ return GenAiUsagePromptTokensKey.Int(val)
+}
+
+// Attributes for GraphQL.
+const (
+ // GraphqlDocumentKey is the attribute Key conforming to the
+ // "graphql.document" semantic conventions. It represents the GraphQL
+ // document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+
+ // GraphqlOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of
+ // the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphqlOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of
+ // the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+ return GraphqlDocumentKey.String(val)
+}
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+ return GraphqlOperationNameKey.String(val)
+}
+
+// Attributes for the Android platform on which the Android application is
+// running.
+const (
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit
+ // hash for the current release
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents
+ // the time and date the release was created
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2022-10-23T18:00:42Z'
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+)
+
+// HerokuAppID returns an attribute KeyValue conforming to the
+// "heroku.app.id" semantic conventions. It represents the unique identifier
+// for the application
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
+// to the "heroku.release.creation_timestamp" semantic conventions. It
+// represents the time and date the release was created
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// A host is defined as a computing instance. For example, physical servers,
+// virtual machines, switches or disk array.
+const (
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is
+ // running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostCPUCacheL2SizeKey is the attribute Key conforming to the
+ // "host.cpu.cache.l2.size" semantic conventions. It represents the amount
+ // of level 2 memory cache available to the processor (in Bytes).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 12288000
+ HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
+
+ // HostCPUFamilyKey is the attribute Key conforming to the
+ // "host.cpu.family" semantic conventions. It represents the family or
+ // generation of the CPU.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '6', 'PA-RISC 1.1e'
+ HostCPUFamilyKey = attribute.Key("host.cpu.family")
+
+ // HostCPUModelIDKey is the attribute Key conforming to the
+ // "host.cpu.model.id" semantic conventions. It represents the model
+ // identifier. It provides more granular information about the CPU,
+ // distinguishing it from other CPUs within the same family.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '6', '9000/778/B180L'
+ HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
+
+ // HostCPUModelNameKey is the attribute Key conforming to the
+ // "host.cpu.model.name" semantic conventions. It represents the model
+ // designation of the processor.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz'
+ HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
+
+ // HostCPUSteppingKey is the attribute Key conforming to the
+ // "host.cpu.stepping" semantic conventions. It represents the stepping or
+ // core revisions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1', 'r1p1'
+ HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
+
+ // HostCPUVendorIDKey is the attribute Key conforming to the
+ // "host.cpu.vendor.id" semantic conventions. It represents the processor
+ // manufacturer identifier. A maximum 12-character string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'GenuineIntel'
+ // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor
+ // ID string in EBX, EDX and ECX registers. Writing these to memory in this
+ // order results in a 12-character string.
+ HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
+
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be
+ // the instance_id assigned by the cloud provider. For non-containerized
+ // systems, this should be the `machine-id`. See the table below for the
+ // sources to use to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the vM image ID or host OS image ID.
+ // For Cloud, this value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageNameKey is the attribute Key conforming to the
+ // "host.image.name" semantic conventions. It represents the name of the VM
+ // image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version
+ // string of the VM image or host OS as defined in [Version
+ // Attributes](/docs/resource/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+
+ // HostIPKey is the attribute Key conforming to the "host.ip" semantic
+ // conventions. It represents the available IP addresses of the host,
+ // excluding loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e'
+ // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
+ // addresses MUST be specified in the [RFC
+ // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format.
+ HostIPKey = attribute.Key("host.ip")
+
+ // HostMacKey is the attribute Key conforming to the "host.mac" semantic
+ // conventions. It represents the available MAC addresses of the host,
+ // excluding loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F'
+ // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal
+ // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf):
+ // as hyphen-separated octets in uppercase hexadecimal form from most to
+ // least significant.
+ HostMacKey = attribute.Key("host.mac")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified
+ // hostname, or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
+// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+// level 2 memory cache available to the processor (in Bytes).
+func HostCPUCacheL2Size(val int) attribute.KeyValue {
+ return HostCPUCacheL2SizeKey.Int(val)
+}
+
+// HostCPUFamily returns an attribute KeyValue conforming to the
+// "host.cpu.family" semantic conventions. It represents the family or
+// generation of the CPU.
+func HostCPUFamily(val string) attribute.KeyValue {
+ return HostCPUFamilyKey.String(val)
+}
+
+// HostCPUModelID returns an attribute KeyValue conforming to the
+// "host.cpu.model.id" semantic conventions. It represents the model
+// identifier. It provides more granular information about the CPU,
+// distinguishing it from other CPUs within the same family.
+func HostCPUModelID(val string) attribute.KeyValue {
+ return HostCPUModelIDKey.String(val)
+}
+
+// HostCPUModelName returns an attribute KeyValue conforming to the
+// "host.cpu.model.name" semantic conventions. It represents the model
+// designation of the processor.
+func HostCPUModelName(val string) attribute.KeyValue {
+ return HostCPUModelNameKey.String(val)
+}
+
+// HostCPUStepping returns an attribute KeyValue conforming to the
+// "host.cpu.stepping" semantic conventions. It represents the stepping or core
+// revisions.
+func HostCPUStepping(val string) attribute.KeyValue {
+ return HostCPUSteppingKey.String(val)
+}
+
+// HostCPUVendorID returns an attribute KeyValue conforming to the
+// "host.cpu.vendor.id" semantic conventions. It represents the processor
+// manufacturer identifier. A maximum 12-character string.
+func HostCPUVendorID(val string) attribute.KeyValue {
+ return HostCPUVendorIDKey.String(val)
+}
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use
+// to determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID or host
+// OS image ID. For Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image or host OS as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
+// conventions. It represents the available IP addresses of the host, excluding
+// loopback interfaces.
+func HostIP(val ...string) attribute.KeyValue {
+ return HostIPKey.StringSlice(val)
+}
+
+// HostMac returns an attribute KeyValue conforming to the "host.mac"
+// semantic conventions. It represents the available MAC addresses of the host,
+// excluding loopback interfaces.
+func HostMac(val ...string) attribute.KeyValue {
+ return HostMacKey.StringSlice(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// Semantic convention attributes in the HTTP namespace.
+const (
+ // HTTPConnectionStateKey is the attribute Key conforming to the
+ // "http.connection.state" semantic conventions. It represents the state of
+ // the HTTP connection in the HTTP connection pool.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'active', 'idle'
+ HTTPConnectionStateKey = attribute.Key("http.connection.state")
+
+ // HTTPRequestBodySizeKey is the attribute Key conforming to the
+ // "http.request.body.size" semantic conventions. It represents the size of
+ // the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3495
+ HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
+
+ // HTTPRequestMethodKey is the attribute Key conforming to the
+ // "http.request.method" semantic conventions. It represents the hTTP
+ // request method.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ // Note: HTTP request method value SHOULD be "known" to the
+ // instrumentation.
+ // By default, this convention defines "known" methods as the ones listed
+ // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
+ // and the PATCH method defined in
+ // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
+ //
+ // If the HTTP request method is not known to instrumentation, it MUST set
+ // the `http.request.method` attribute to `_OTHER`.
+ //
+ // If the HTTP instrumentation could end up converting valid HTTP request
+ // methods to `_OTHER`, then it MUST provide a way to override
+ // the list of known HTTP methods. If this override is done via environment
+ // variable, then the environment variable MUST be named
+ // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
+ // list of case-sensitive known HTTP methods
+ // (this list MUST be a full override of the default known method, it is
+ // not a list of known methods in addition to the defaults).
+ //
+ // HTTP method names are case-sensitive and `http.request.method` attribute
+ // value MUST match a known HTTP method name exactly.
+ // Instrumentations for specific web frameworks that consider HTTP methods
+ // to be case insensitive, SHOULD populate a canonical equivalent.
+ // Tracing instrumentations that do so, MUST also set
+ // `http.request.method_original` to the original value.
+ HTTPRequestMethodKey = attribute.Key("http.request.method")
+
+ // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
+ // "http.request.method_original" semantic conventions. It represents the
+ // original HTTP method sent by the client in the request line.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'GeT', 'ACL', 'foo'
+ HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
+
+ // HTTPRequestResendCountKey is the attribute Key conforming to the
+ // "http.request.resend_count" semantic conventions. It represents the
+ // ordinal number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending
+ // (e.g. redirection, authorization failure, 503 Server Unavailable,
+ // network issues, or any other).
+ HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
+
+ // HTTPRequestSizeKey is the attribute Key conforming to the
+ // "http.request.size" semantic conventions. It represents the total size
+ // of the request in bytes. This should be the total number of bytes sent
+ // over the wire, including the request line (HTTP/1.1), framing (HTTP/2
+ // and HTTP/3), headers, and request body if any.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1437
+ HTTPRequestSizeKey = attribute.Key("http.request.size")
+
+ // HTTPResponseBodySizeKey is the attribute Key conforming to the
+ // "http.response.body.size" semantic conventions. It represents the size
+ // of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3495
+ HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
+
+ // HTTPResponseSizeKey is the attribute Key conforming to the
+ // "http.response.size" semantic conventions. It represents the total size
+ // of the response in bytes. This should be the total number of bytes sent
+ // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and
+ // HTTP/3), headers, and response body and trailers if any.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1437
+ HTTPResponseSizeKey = attribute.Key("http.response.size")
+
+ // HTTPResponseStatusCodeKey is the attribute Key conforming to the
+ // "http.response.status_code" semantic conventions. It represents the
+ // [HTTP response status
+ // code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 200
+ HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route"
+ // semantic conventions. It represents the matched route, that is, the path
+ // template in the format used by the respective server framework.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: MUST NOT be populated when this is not supported by the HTTP
+ // server framework as the route attribute should have low-cardinality and
+ // the URI path can NOT substitute it.
+ // SHOULD include the [application
+ // root](/docs/http/http-spans.md#http-server-definitions) if there is one.
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+var (
+ // active state
+ HTTPConnectionStateActive = HTTPConnectionStateKey.String("active")
+ // idle state
+ HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle")
+)
+
+var (
+ // CONNECT method
+ HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
+ // DELETE method
+ HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
+ // GET method
+ HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
+ // HEAD method
+ HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
+ // OPTIONS method
+ HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
+ // PATCH method
+ HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
+ // POST method
+ HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
+ // PUT method
+ HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
+ // TRACE method
+ HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
+ // Any HTTP method that the instrumentation has no prior knowledge of
+ HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
+)
+
+// HTTPRequestBodySize returns an attribute KeyValue conforming to the
+// "http.request.body.size" semantic conventions. It represents the size of the
+// request payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestBodySize(val int) attribute.KeyValue {
+ return HTTPRequestBodySizeKey.Int(val)
+}
+
+// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
+// "http.request.method_original" semantic conventions. It represents the
+// original HTTP method sent by the client in the request line.
+func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
+ return HTTPRequestMethodOriginalKey.String(val)
+}
+
+// HTTPRequestResendCount returns an attribute KeyValue conforming to the
+// "http.request.resend_count" semantic conventions. It represents the ordinal
+// number of request resending attempt (for any reason, including redirects).
+func HTTPRequestResendCount(val int) attribute.KeyValue {
+ return HTTPRequestResendCountKey.Int(val)
+}
+
+// HTTPRequestSize returns an attribute KeyValue conforming to the
+// "http.request.size" semantic conventions. It represents the total size of
+// the request in bytes. This should be the total number of bytes sent over the
+// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+// headers, and request body if any.
+func HTTPRequestSize(val int) attribute.KeyValue {
+ return HTTPRequestSizeKey.Int(val)
+}
+
+// HTTPResponseBodySize returns an attribute KeyValue conforming to the
+// "http.response.body.size" semantic conventions. It represents the size of
+// the response payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseBodySize(val int) attribute.KeyValue {
+ return HTTPResponseBodySizeKey.Int(val)
+}
+
+// HTTPResponseSize returns an attribute KeyValue conforming to the
+// "http.response.size" semantic conventions. It represents the total size of
+// the response in bytes. This should be the total number of bytes sent over
+// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+// headers, and response body and trailers if any.
+func HTTPResponseSize(val int) attribute.KeyValue {
+ return HTTPResponseSizeKey.Int(val)
+}
+
+// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
+// "http.response.status_code" semantic conventions. It represents the [HTTP
+// response status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPResponseStatusCode(val int) attribute.KeyValue {
+ return HTTPResponseStatusCodeKey.Int(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route, that is, the path
+// template in the format used by the respective server framework.
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Java Virtual machine related attributes.
+const (
+ // JvmBufferPoolNameKey is the attribute Key conforming to the
+ // "jvm.buffer.pool.name" semantic conventions. It represents the name of
+ // the buffer pool.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mapped', 'direct'
+ // Note: Pool names are generally obtained via
+ // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()).
+ JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name")
+
+ // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action"
+ // semantic conventions. It represents the name of the garbage collector
+ // action.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'end of minor GC', 'end of major GC'
+ // Note: Garbage collector action is generally obtained via
+ // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()).
+ JvmGcActionKey = attribute.Key("jvm.gc.action")
+
+ // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name"
+ // semantic conventions. It represents the name of the garbage collector.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'G1 Young Generation', 'G1 Old Generation'
+ // Note: Garbage collector name is generally obtained via
+ // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()).
+ JvmGcNameKey = attribute.Key("jvm.gc.name")
+
+ // JvmMemoryPoolNameKey is the attribute Key conforming to the
+ // "jvm.memory.pool.name" semantic conventions. It represents the name of
+ // the memory pool.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
+ // Note: Pool names are generally obtained via
+ // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
+ JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name")
+
+ // JvmMemoryTypeKey is the attribute Key conforming to the
+ // "jvm.memory.type" semantic conventions. It represents the type of
+ // memory.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'heap', 'non_heap'
+ JvmMemoryTypeKey = attribute.Key("jvm.memory.type")
+
+ // JvmThreadDaemonKey is the attribute Key conforming to the
+ // "jvm.thread.daemon" semantic conventions. It represents the whether the
+ // thread is daemon or not.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon")
+
+ // JvmThreadStateKey is the attribute Key conforming to the
+ // "jvm.thread.state" semantic conventions. It represents the state of the
+ // thread.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'runnable', 'blocked'
+ JvmThreadStateKey = attribute.Key("jvm.thread.state")
+)
+
+var (
+ // Heap memory
+ JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap")
+ // Non-heap memory
+ JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap")
+)
+
+var (
+ // A thread that has not yet started is in this state
+ JvmThreadStateNew = JvmThreadStateKey.String("new")
+ // A thread executing in the Java virtual machine is in this state
+ JvmThreadStateRunnable = JvmThreadStateKey.String("runnable")
+ // A thread that is blocked waiting for a monitor lock is in this state
+ JvmThreadStateBlocked = JvmThreadStateKey.String("blocked")
+ // A thread that is waiting indefinitely for another thread to perform a particular action is in this state
+ JvmThreadStateWaiting = JvmThreadStateKey.String("waiting")
+ // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state
+ JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting")
+ // A thread that has exited is in this state
+ JvmThreadStateTerminated = JvmThreadStateKey.String("terminated")
+)
+
+// JvmBufferPoolName returns an attribute KeyValue conforming to the
+// "jvm.buffer.pool.name" semantic conventions. It represents the name of the
+// buffer pool.
+func JvmBufferPoolName(val string) attribute.KeyValue {
+ return JvmBufferPoolNameKey.String(val)
+}
+
+// JvmGcAction returns an attribute KeyValue conforming to the
+// "jvm.gc.action" semantic conventions. It represents the name of the garbage
+// collector action.
+func JvmGcAction(val string) attribute.KeyValue {
+ return JvmGcActionKey.String(val)
+}
+
+// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name"
+// semantic conventions. It represents the name of the garbage collector.
+func JvmGcName(val string) attribute.KeyValue {
+ return JvmGcNameKey.String(val)
+}
+
+// JvmMemoryPoolName returns an attribute KeyValue conforming to the
+// "jvm.memory.pool.name" semantic conventions. It represents the name of the
+// memory pool.
+func JvmMemoryPoolName(val string) attribute.KeyValue {
+ return JvmMemoryPoolNameKey.String(val)
+}
+
+// JvmThreadDaemon returns an attribute KeyValue conforming to the
+// "jvm.thread.daemon" semantic conventions. It represents the whether the
+// thread is daemon or not.
+func JvmThreadDaemon(val bool) attribute.KeyValue {
+ return JvmThreadDaemonKey.Bool(val)
+}
+
+// Kubernetes resource attributes.
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the
+ // "k8s.cluster.name" semantic conventions. It represents the name of the
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+
+ // K8SClusterUIDKey is the attribute Key conforming to the
+ // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
+ // the cluster, set to the UID of the `kube-system` namespace.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
+ // Note: K8S doesn't have support for obtaining a cluster ID. If this is
+ // ever
+ // added, we will recommend collecting the `k8s.cluster.uid` through the
+ // official APIs. In the meantime, we are able to use the `uid` of the
+ // `kube-system` namespace as a proxy for cluster ID. Read on for the
+ // rationale.
+ //
+ // Every object created in a K8S cluster is assigned a distinct UID. The
+ // `kube-system` namespace is used by Kubernetes itself and will exist
+ // for the lifetime of the cluster. Using the `uid` of the `kube-system`
+ // namespace is a reasonable proxy for the K8S ClusterID as it will only
+ // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
+ // UUIDs as standardized by
+ // [ISO/IEC 9834-8 and ITU-T
+ // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
+ // Which states:
+ //
+ // > If generated according to one of the mechanisms defined in Rec.
+ // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
+ // different from all other UUIDs generated before 3603 A.D., or is
+ // extremely likely to be different (depending on the mechanism chosen).
+ //
+ // Therefore, UIDs between clusters should be extremely unlikely to
+ // conflict.
+ K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
+
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the
+ // number of times the container was restarted. This attribute can be used
+ // to identify a particular container (running or stopped) within a
+ // container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+
+ // K8SContainerStatusLastTerminatedReasonKey is the attribute Key
+ // conforming to the "k8s.container.status.last_terminated_reason" semantic
+ // conventions. It represents the last terminated reason of the Container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Evicted', 'Error'
+ K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the
+ // "k8s.cronjob.name" semantic conventions. It represents the name of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+
+ // K8SCronJobUIDKey is the attribute Key conforming to the
+ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the
+ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of
+ // the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+ // semantic conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+ // semantic conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+ // semantic conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+ // semantic conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+ // semantic conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of
+ // the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of
+ // the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// K8SClusterUID returns an attribute KeyValue conforming to the
+// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
+// cluster, set to the UID of the `kube-system` namespace.
+func K8SClusterUID(val string) attribute.KeyValue {
+ return K8SClusterUIDKey.String(val)
+}
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue
+// conforming to the "k8s.container.status.last_terminated_reason" semantic
+// conventions. It represents the last terminated reason of the Container.
+func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue {
+ return K8SContainerStatusLastTerminatedReasonKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// Log attributes
+const (
+ // LogIostreamKey is the attribute Key conforming to the "log.iostream"
+ // semantic conventions. It represents the stream associated with the log.
+ // See below for a list of well-known values.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ LogIostreamKey = attribute.Key("log.iostream")
+)
+
+var (
+ // Logs from stdout stream
+ LogIostreamStdout = LogIostreamKey.String("stdout")
+ // Events from stderr stream
+ LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// Attributes for a file to which log was emitted.
+const (
+ // LogFileNameKey is the attribute Key conforming to the "log.file.name"
+ // semantic conventions. It represents the basename of the file.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'audit.log'
+ LogFileNameKey = attribute.Key("log.file.name")
+
+ // LogFileNameResolvedKey is the attribute Key conforming to the
+ // "log.file.name_resolved" semantic conventions. It represents the
+ // basename of the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'uuid.log'
+ LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+ // LogFilePathKey is the attribute Key conforming to the "log.file.path"
+ // semantic conventions. It represents the full path to the file.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/var/log/mysql/audit.log'
+ LogFilePathKey = attribute.Key("log.file.path")
+
+ // LogFilePathResolvedKey is the attribute Key conforming to the
+ // "log.file.path_resolved" semantic conventions. It represents the full
+ // path to the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/var/lib/docker/uuid.log'
+ LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the
+// "log.file.name" semantic conventions. It represents the basename of the
+// file.
+func LogFileName(val string) attribute.KeyValue {
+ return LogFileNameKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+ return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the
+// "log.file.path" semantic conventions. It represents the full path to the
+// file.
+func LogFilePath(val string) attribute.KeyValue {
+ return LogFilePathKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path
+// to the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+ return LogFilePathResolvedKey.String(val)
+}
+
+// The generic attributes that may be used in any Log Record.
+const (
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log
+ // Record.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an [Universally Unique Lexicographically Sortable
+ // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
+ // (e.g. UUID) may be used as needed.
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Attributes describing telemetry around messaging systems and messaging
+// activities.
+const (
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client
+ // library supports both batch and single-message API for the same
+ // operation, instrumentations SHOULD use `messaging.batch.message_count`
+ // for batching APIs and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+
+ // MessagingClientIDKey is the attribute Key conforming to the
+ // "messaging.client.id" semantic conventions. It represents a unique
+ // identifier for the client that consumes or produces a message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'client-5', 'myhost@8742@s8083jm'
+ MessagingClientIDKey = attribute.Key("messaging.client.id")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the
+ // message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic
+ // or other entity within the broker. If
+ // the broker doesn't have such notion, the destination name SHOULD
+ // uniquely identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationPartitionIDKey is the attribute Key conforming to
+ // the "messaging.destination.partition.id" semantic conventions. It
+ // represents the identifier of the partition messages are sent to or
+ // received from, unique within the `messaging.destination.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1'
+ MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the
+ // low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example
+ // would be a destination name involving a user name or product id.
+ // Although the destination name in this case is of high cardinality, the
+ // underlying template is of low cardinality and can be effectively used
+ // for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might
+ // not exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingDestinationPublishAnonymousKey is the attribute Key conforming
+ // to the "messaging.destination_publish.anonymous" semantic conventions.
+ // It represents a boolean that is true if the publish message destination
+ // is anonymous (could be unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous")
+
+ // MessagingDestinationPublishNameKey is the attribute Key conforming to
+ // the "messaging.destination_publish.name" semantic conventions. It
+ // represents the name of the original destination the message was
+ // published to
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: The name SHOULD uniquely identify a specific queue, topic, or
+ // other entity within the broker. If
+ // the broker doesn't have such notion, the original destination name
+ // SHOULD uniquely identify the broker.
+ MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name")
+
+ // MessagingMessageBodySizeKey is the attribute Key conforming to the
+ // "messaging.message.body.size" semantic conventions. It represents the
+ // size of the message body in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1439
+ // Note: This can refer to both the compressed or uncompressed body size.
+ // If both sizes are known, the uncompressed
+ // body size should be used.
+ MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents
+ // the conversation ID identifying the conversation to which the message
+ // belongs, represented as a string. Sometimes called "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
+ // "messaging.message.envelope.size" semantic conventions. It represents
+ // the size of the message body and metadata in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 2738
+ // Note: This can refer to both the compressed or uncompressed size. If
+ // both sizes are known, the uncompressed
+ // size should be used.
+ MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
+
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used
+ // by the messaging system as an identifier for the message, represented as
+ // a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingOperationNameKey is the attribute Key conforming to the
+ // "messaging.operation.name" semantic conventions. It represents the
+ // system-specific name of the messaging operation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ack', 'nack', 'send'
+ MessagingOperationNameKey = attribute.Key("messaging.operation.name")
+
+ // MessagingOperationTypeKey is the attribute Key conforming to the
+ // "messaging.operation.type" semantic conventions. It represents a string
+ // identifying the type of the messaging operation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationTypeKey = attribute.Key("messaging.operation.type")
+
+ // MessagingSystemKey is the attribute Key conforming to the
+ // "messaging.system" semantic conventions. It represents the messaging
+ // system as identified by the client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The actual messaging system may differ from the one known by the
+ // client. For example, when using Kafka client libraries to communicate
+ // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on
+ // the instrumentation's best knowledge.
+ MessagingSystemKey = attribute.Key("messaging.system")
+)
+
+var (
+ // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created
+ MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish")
+ // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios
+ MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create")
+ // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages
+ MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive")
+ // One or more messages are delivered to or processed by a consumer
+ MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process")
+ // One or more messages are settled
+ MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle")
+)
+
+var (
+ // Apache ActiveMQ
+ MessagingSystemActivemq = MessagingSystemKey.String("activemq")
+ // Amazon Simple Queue Service (SQS)
+ MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs")
+ // Azure Event Grid
+ MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid")
+ // Azure Event Hubs
+ MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs")
+ // Azure Service Bus
+ MessagingSystemServicebus = MessagingSystemKey.String("servicebus")
+ // Google Cloud Pub/Sub
+ MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub")
+ // Java Message Service
+ MessagingSystemJms = MessagingSystemKey.String("jms")
+ // Apache Kafka
+ MessagingSystemKafka = MessagingSystemKey.String("kafka")
+ // RabbitMQ
+ MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq")
+ // Apache RocketMQ
+ MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq")
+)
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// MessagingClientID returns an attribute KeyValue conforming to the
+// "messaging.client.id" semantic conventions. It represents a unique
+// identifier for the client that consumes or produces a message.
+func MessagingClientID(val string) attribute.KeyValue {
+ return MessagingClientIDKey.String(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationPartitionID returns an attribute KeyValue conforming
+// to the "messaging.destination.partition.id" semantic conventions. It
+// represents the identifier of the partition messages are sent to or received
+// from, unique within the `messaging.destination.name`.
+func MessagingDestinationPartitionID(val string) attribute.KeyValue {
+ return MessagingDestinationPartitionIDKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationPublishAnonymous returns an attribute KeyValue
+// conforming to the "messaging.destination_publish.anonymous" semantic
+// conventions. It represents a boolean that is true if the publish message
+// destination is anonymous (could be unnamed or have auto-generated name).
+func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationPublishAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationPublishName returns an attribute KeyValue conforming
+// to the "messaging.destination_publish.name" semantic conventions. It
+// represents the name of the original destination the message was published to
+func MessagingDestinationPublishName(val string) attribute.KeyValue {
+ return MessagingDestinationPublishNameKey.String(val)
+}
+
+// MessagingMessageBodySize returns an attribute KeyValue conforming to the
+// "messaging.message.body.size" semantic conventions. It represents the size
+// of the message body in bytes.
+func MessagingMessageBodySize(val int) attribute.KeyValue {
+ return MessagingMessageBodySizeKey.Int(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the conversation ID identifying the conversation to which the
+// message belongs, represented as a string. Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to
+// the "messaging.message.envelope.size" semantic conventions. It represents
+// the size of the message body and metadata in bytes.
+func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
+ return MessagingMessageEnvelopeSizeKey.Int(val)
+}
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingOperationName returns an attribute KeyValue conforming to the
+// "messaging.operation.name" semantic conventions. It represents the
+// system-specific name of the messaging operation.
+func MessagingOperationName(val string) attribute.KeyValue {
+ return MessagingOperationNameKey.String(val)
+}
+
+// This group describes attributes specific to Apache Kafka.
+const (
+ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+ // "messaging.kafka.consumer.group" semantic conventions. It represents the
+ // name of the Kafka Consumer Group that is handling the message. Only
+ // applies to consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the
+ // message keys in Kafka are used for grouping alike messages to ensure
+ // they're processed on the same partition. They differ from
+ // `messaging.message.id` in that they're not unique. If the key is `null`,
+ // the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to
+ // be supplied for the attribute. If the key has no unambiguous, canonical
+ // string form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.message.offset" semantic conventions. It represents the
+ // offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents
+ // a boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+ return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+ return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// This group describes attributes specific to RabbitMQ.
+const (
+ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+ // conventions. It represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+
+ // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming
+ // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions.
+ // It represents the rabbitMQ message delivery tag
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 123
+ MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic
+// conventions. It represents the rabbitMQ message delivery tag
+func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue {
+ return MessagingRabbitmqMessageDeliveryTagKey.Int(val)
+}
+
+// This group describes attributes specific to RocketMQ.
+const (
+ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_group" semantic conventions. It represents
+ // the name of the RocketMQ producer/consumer group that is handling the
+ // message. The client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.consumption_model" semantic conventions. It
+ // represents the model of message consumption. This only applies to
+ // consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+
+ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+ // conventions. It represents the delay time level for delay message, which
+ // determines the message delay time.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delivery_timestamp"
+ // semantic conventions. It represents the timestamp in milliseconds that
+ // the delay message is expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents
+ // the it is essential for FIFO message. Messages that belong to the same
+ // message group are always processed one by one within the same consumer
+ // group.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents
+ // the key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents
+ // the type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+ return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// This group describes attributes specific to GCP Pub/Sub.
+const (
+ // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming
+ // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions.
+ // It represents the ack deadline in seconds set for the modify ack
+ // deadline request.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline")
+
+ // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the
+ // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
+ // represents the ack id for a given message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ack_id'
+ MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id")
+
+ // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key
+ // conforming to the "messaging.gcp_pubsub.message.delivery_attempt"
+ // semantic conventions. It represents the delivery attempt for a given
+ // message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 2
+ MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt")
+
+ // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming
+ // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions.
+ // It represents the ordering key for a given message. If the attribute is
+ // not present, the message does not have an ordering key.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ordering_key'
+ MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
+)
+
+// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic
+// conventions. It represents the ack deadline in seconds set for the modify
+// ack deadline request.
+func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue {
+ return MessagingGCPPubsubMessageAckDeadlineKey.Int(val)
+}
+
+// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming
+// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
+// represents the ack id for a given message.
+func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue {
+ return MessagingGCPPubsubMessageAckIDKey.String(val)
+}
+
+// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic
+// conventions. It represents the delivery attempt for a given message.
+func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue {
+ return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val)
+}
+
+// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic
+// conventions. It represents the ordering key for a given message. If the
+// attribute is not present, the message does not have an ordering key.
+func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue {
+ return MessagingGCPPubsubMessageOrderingKeyKey.String(val)
+}
+
+// This group describes attributes specific to Azure Service Bus.
+const (
+ // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key
+ // conforming to the "messaging.servicebus.destination.subscription_name"
+ // semantic conventions. It represents the name of the subscription in the
+ // topic messages are received from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mySubscription'
+ MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name")
+
+ // MessagingServicebusDispositionStatusKey is the attribute Key conforming
+ // to the "messaging.servicebus.disposition_status" semantic conventions.
+ // It represents the describes the [settlement
+ // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status")
+
+ // MessagingServicebusMessageDeliveryCountKey is the attribute Key
+ // conforming to the "messaging.servicebus.message.delivery_count" semantic
+ // conventions. It represents the number of deliveries that have been
+ // attempted for this message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 2
+ MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count")
+
+ // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key
+ // conforming to the "messaging.servicebus.message.enqueued_time" semantic
+ // conventions. It represents the UTC epoch seconds at which the message
+ // has been accepted and stored in the entity.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1701393730
+ MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time")
+)
+
+var (
+ // Message is completed
+ MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete")
+ // Message is abandoned
+ MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon")
+ // Message is sent to dead letter queue
+ MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter")
+ // Message is deferred
+ MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer")
+)
+
+// MessagingServicebusDestinationSubscriptionName returns an attribute
+// KeyValue conforming to the
+// "messaging.servicebus.destination.subscription_name" semantic conventions.
+// It represents the name of the subscription in the topic messages are
+// received from.
+func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue {
+ return MessagingServicebusDestinationSubscriptionNameKey.String(val)
+}
+
+// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.delivery_count" semantic
+// conventions. It represents the number of deliveries that have been attempted
+// for this message.
+func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue {
+ return MessagingServicebusMessageDeliveryCountKey.Int(val)
+}
+
+// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.enqueued_time" semantic
+// conventions. It represents the UTC epoch seconds at which the message has
+// been accepted and stored in the entity.
+func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue {
+ return MessagingServicebusMessageEnqueuedTimeKey.Int(val)
+}
+
+// This group describes attributes specific to Azure Event Hubs.
+const (
+ // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to
+ // the "messaging.eventhubs.consumer.group" semantic conventions. It
+ // represents the name of the consumer group the event consumer is
+ // associated with.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'indexer'
+ MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group")
+
+ // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming
+ // to the "messaging.eventhubs.message.enqueued_time" semantic conventions.
+ // It represents the UTC epoch seconds at which the message has been
+ // accepted and stored in the entity.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1701393730
+ MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time")
+)
+
+// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming
+// to the "messaging.eventhubs.consumer.group" semantic conventions. It
+// represents the name of the consumer group the event consumer is associated
+// with.
+func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue {
+ return MessagingEventhubsConsumerGroupKey.String(val)
+}
+
+// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue
+// conforming to the "messaging.eventhubs.message.enqueued_time" semantic
+// conventions. It represents the UTC epoch seconds at which the message has
+// been accepted and stored in the entity.
+func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue {
+ return MessagingEventhubsMessageEnqueuedTimeKey.Int(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetworkCarrierIccKey is the attribute Key conforming to the
+ // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+ // alpha-2 2-character country code associated with the mobile carrier
+ // network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'DE'
+ NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
+
+ // NetworkCarrierMccKey is the attribute Key conforming to the
+ // "network.carrier.mcc" semantic conventions. It represents the mobile
+ // carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '310'
+ NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
+
+ // NetworkCarrierMncKey is the attribute Key conforming to the
+ // "network.carrier.mnc" semantic conventions. It represents the mobile
+ // carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '001'
+ NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
+
+ // NetworkCarrierNameKey is the attribute Key conforming to the
+ // "network.carrier.name" semantic conventions. It represents the name of
+ // the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'sprint'
+ NetworkCarrierNameKey = attribute.Key("network.carrier.name")
+
+ // NetworkConnectionSubtypeKey is the attribute Key conforming to the
+ // "network.connection.subtype" semantic conventions. It represents the
+ // this describes more details regarding the connection.type. It may be the
+ // type of cell technology connection, but it could be used for describing
+ // details about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'LTE'
+ NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
+
+ // NetworkConnectionTypeKey is the attribute Key conforming to the
+ // "network.connection.type" semantic conventions. It represents the
+ // internet connection type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'wifi'
+ NetworkConnectionTypeKey = attribute.Key("network.connection.type")
+
+ // NetworkIoDirectionKey is the attribute Key conforming to the
+ // "network.io.direction" semantic conventions. It represents the network
+ // IO operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'transmit'
+ NetworkIoDirectionKey = attribute.Key("network.io.direction")
+
+ // NetworkLocalAddressKey is the attribute Key conforming to the
+ // "network.local.address" semantic conventions. It represents the local
+ // address of the network connection - IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10.1.2.80', '/tmp/my.sock'
+ NetworkLocalAddressKey = attribute.Key("network.local.address")
+
+ // NetworkLocalPortKey is the attribute Key conforming to the
+ // "network.local.port" semantic conventions. It represents the local port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ NetworkLocalPortKey = attribute.Key("network.local.port")
+
+ // NetworkPeerAddressKey is the attribute Key conforming to the
+ // "network.peer.address" semantic conventions. It represents the peer
+ // address of the network connection - IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10.1.2.80', '/tmp/my.sock'
+ NetworkPeerAddressKey = attribute.Key("network.peer.address")
+
+ // NetworkPeerPortKey is the attribute Key conforming to the
+ // "network.peer.port" semantic conventions. It represents the peer port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ NetworkPeerPortKey = attribute.Key("network.peer.port")
+
+ // NetworkProtocolNameKey is the attribute Key conforming to the
+ // "network.protocol.name" semantic conventions. It represents the [OSI
+ // application layer](https://osi-model.com/application-layer/) or non-OSI
+ // equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ // Note: The value SHOULD be normalized to lowercase.
+ NetworkProtocolNameKey = attribute.Key("network.protocol.name")
+
+ // NetworkProtocolVersionKey is the attribute Key conforming to the
+ // "network.protocol.version" semantic conventions. It represents the
+ // actual version of the protocol used for network communication.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.1', '2'
+ // Note: If protocol version is subject to negotiation (for example using
+ // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute
+ // SHOULD be set to the negotiated version. If the actual protocol version
+ // is not known, this attribute SHOULD NOT be set.
+ NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
+
+ // NetworkTransportKey is the attribute Key conforming to the
+ // "network.transport" semantic conventions. It represents the [OSI
+ // transport layer](https://osi-model.com/transport-layer/) or
+ // [inter-process communication
+ // method](https://wikipedia.org/wiki/Inter-process_communication).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tcp', 'udp'
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // Consider always setting the transport when setting a port number, since
+ // a port number is ambiguous without knowing the transport. For example
+ // different processes could be listening on TCP port 12345 and UDP port
+ // 12345.
+ NetworkTransportKey = attribute.Key("network.transport")
+
+ // NetworkTypeKey is the attribute Key conforming to the "network.type"
+ // semantic conventions. It represents the [OSI network
+ // layer](https://osi-model.com/network-layer/) or non-OSI equivalent.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ipv4', 'ipv6'
+ // Note: The value SHOULD be normalized to lowercase.
+ NetworkTypeKey = attribute.Key("network.type")
+)
+
+var (
+ // GPRS
+ NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
+)
+
+var (
+ // wifi
+ NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
+ // wired
+ NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
+ // cell
+ NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
+ // unavailable
+ NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
+ // unknown
+ NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
+)
+
+var (
+ // transmit
+ NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit")
+ // receive
+ NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive")
+)
+
+var (
+ // TCP
+ NetworkTransportTCP = NetworkTransportKey.String("tcp")
+ // UDP
+ NetworkTransportUDP = NetworkTransportKey.String("udp")
+ // Named or anonymous pipe
+ NetworkTransportPipe = NetworkTransportKey.String("pipe")
+ // Unix domain socket
+ NetworkTransportUnix = NetworkTransportKey.String("unix")
+)
+
+var (
+ // IPv4
+ NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
+ // IPv6
+ NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
+)
+
+// NetworkCarrierIcc returns an attribute KeyValue conforming to the
+// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetworkCarrierIcc(val string) attribute.KeyValue {
+ return NetworkCarrierIccKey.String(val)
+}
+
+// NetworkCarrierMcc returns an attribute KeyValue conforming to the
+// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+// country code.
+func NetworkCarrierMcc(val string) attribute.KeyValue {
+ return NetworkCarrierMccKey.String(val)
+}
+
+// NetworkCarrierMnc returns an attribute KeyValue conforming to the
+// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+// network code.
+func NetworkCarrierMnc(val string) attribute.KeyValue {
+ return NetworkCarrierMncKey.String(val)
+}
+
+// NetworkCarrierName returns an attribute KeyValue conforming to the
+// "network.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetworkCarrierName(val string) attribute.KeyValue {
+ return NetworkCarrierNameKey.String(val)
+}
+
+// NetworkLocalAddress returns an attribute KeyValue conforming to the
+// "network.local.address" semantic conventions. It represents the local
+// address of the network connection - IP address or Unix domain socket name.
+func NetworkLocalAddress(val string) attribute.KeyValue {
+ return NetworkLocalAddressKey.String(val)
+}
+
+// NetworkLocalPort returns an attribute KeyValue conforming to the
+// "network.local.port" semantic conventions. It represents the local port
+// number of the network connection.
+func NetworkLocalPort(val int) attribute.KeyValue {
+ return NetworkLocalPortKey.Int(val)
+}
+
+// NetworkPeerAddress returns an attribute KeyValue conforming to the
+// "network.peer.address" semantic conventions. It represents the peer address
+// of the network connection - IP address or Unix domain socket name.
+func NetworkPeerAddress(val string) attribute.KeyValue {
+ return NetworkPeerAddressKey.String(val)
+}
+
+// NetworkPeerPort returns an attribute KeyValue conforming to the
+// "network.peer.port" semantic conventions. It represents the peer port number
+// of the network connection.
+func NetworkPeerPort(val int) attribute.KeyValue {
+ return NetworkPeerPortKey.Int(val)
+}
+
+// NetworkProtocolName returns an attribute KeyValue conforming to the
+// "network.protocol.name" semantic conventions. It represents the [OSI
+// application layer](https://osi-model.com/application-layer/) or non-OSI
+// equivalent.
+func NetworkProtocolName(val string) attribute.KeyValue {
+ return NetworkProtocolNameKey.String(val)
+}
+
+// NetworkProtocolVersion returns an attribute KeyValue conforming to the
+// "network.protocol.version" semantic conventions. It represents the actual
+// version of the protocol used for network communication.
+func NetworkProtocolVersion(val string) attribute.KeyValue {
+ return NetworkProtocolVersionKey.String(val)
+}
+
+// An OCI image manifest.
+const (
+ // OciManifestDigestKey is the attribute Key conforming to the
+ // "oci.manifest.digest" semantic conventions. It represents the digest of
+ // the OCI image manifest. For container images specifically is the digest
+ // by which the container image is known.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4'
+ // Note: Follows [OCI Image Manifest
+ // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md),
+ // and specifically the [Digest
+ // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests).
+ // An example can be found in [Example Image
+ // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest).
+ OciManifestDigestKey = attribute.Key("oci.manifest.digest")
+)
+
+// OciManifestDigest returns an attribute KeyValue conforming to the
+// "oci.manifest.digest" semantic conventions. It represents the digest of the
+// OCI image manifest. For container images specifically is the digest by which
+// the container image is known.
+func OciManifestDigest(val string) attribute.KeyValue {
+ return OciManifestDigestKey.String(val)
+}
+
+// Attributes used by the OpenTracing Shim layer.
+const (
+ // OpentracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the
+ // parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span doesn't depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+ // OSBuildIDKey is the attribute Key conforming to the "os.build_id"
+ // semantic conventions. It represents the unique identifier for a
+ // particular build or compilation of the operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'TQ3C.230805.001.B2', '20E247', '22621'
+ OSBuildIDKey = attribute.Key("os.build_id")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to
+ // be parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+ // LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version"
+ // semantic conventions. It represents the version string of the operating
+ // system as defined in [Version
+ // Attributes](/docs/resource/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the operating system.
+func OSBuildID(val string) attribute.KeyValue {
+ return OSBuildIDKey.String(val)
+}
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// Attributes reserved for OpenTelemetry
+const (
+ // OTelStatusCodeKey is the attribute Key conforming to the
+ // "otel.status_code" semantic conventions. It represents the name of the
+ // code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the
+ // description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'resource not found'
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+ // OTelScopeNameKey is the attribute Key conforming to the
+ // "otel.scope.name" semantic conventions. It represents the name of the
+ // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of
+ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0.0'
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service"
+ // semantic conventions. It represents the
+ // [`service.name`](/docs/resource/README.md#service) of the remote
+ // service. SHOULD be equal to the actual `service.name` resource attribute
+ // of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](/docs/resource/README.md#service) of the remote service.
+// SHOULD be equal to the actual `service.name` resource attribute of the
+// remote service if any.
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// An operating system process.
+const (
+ // ProcessCommandKey is the attribute Key conforming to the
+ // "process.command" semantic conventions. It represents the command used
+ // to launch the process (i.e. the command name). On Linux based systems,
+ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+ // be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+ // this would be the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full
+ // command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`.
+ // Do not set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessContextSwitchTypeKey is the attribute Key conforming to the
+ // "process.context_switch_type" semantic conventions. It represents the
+ // specifies whether the context switches for this data point were
+ // voluntary or involuntary.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type")
+
+ // ProcessCreationTimeKey is the attribute Key conforming to the
+ // "process.creation.time" semantic conventions. It represents the date and
+ // time the process was created, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2023-11-21T09:25:34.853Z'
+ ProcessCreationTimeKey = attribute.Key("process.creation.time")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name
+ // of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+ // of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full
+ // path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessExitCodeKey is the attribute Key conforming to the
+ // "process.exit.code" semantic conventions. It represents the exit code of
+ // the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 127
+ ProcessExitCodeKey = attribute.Key("process.exit.code")
+
+ // ProcessExitTimeKey is the attribute Key conforming to the
+ // "process.exit.time" semantic conventions. It represents the date and
+ // time the process exited, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2023-11-21T09:26:12.315Z'
+ ProcessExitTimeKey = attribute.Key("process.exit.time")
+
+ // ProcessGroupLeaderPIDKey is the attribute Key conforming to the
+ // "process.group_leader.pid" semantic conventions. It represents the PID
+ // of the process's group leader. This is also the process group ID (PGID)
+ // of the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 23
+ ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid")
+
+ // ProcessInteractiveKey is the attribute Key conforming to the
+ // "process.interactive" semantic conventions. It represents the whether
+ // the process is connected to an interactive shell.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ ProcessInteractiveKey = attribute.Key("process.interactive")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns
+ // the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+
+ // ProcessPagingFaultTypeKey is the attribute Key conforming to the
+ // "process.paging.fault_type" semantic conventions. It represents the type
+ // of page fault for this data point. Type `major` is for major/hard page
+ // faults, and `minor` is for minor/soft page faults.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent
+ // Process identifier (PPID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid"
+ // semantic conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessRealUserIDKey is the attribute Key conforming to the
+ // "process.real_user.id" semantic conventions. It represents the real user
+ // ID (RUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1000
+ ProcessRealUserIDKey = attribute.Key("process.real_user.id")
+
+ // ProcessRealUserNameKey is the attribute Key conforming to the
+ // "process.real_user.name" semantic conventions. It represents the
+ // username of the real user of the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'operator'
+ ProcessRealUserNameKey = attribute.Key("process.real_user.name")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of
+ // the runtime of this process. For compiled native binaries, this SHOULD
+ // be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the
+ // version of the runtime of this process, as returned by the runtime
+ // without modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessSavedUserIDKey is the attribute Key conforming to the
+ // "process.saved_user.id" semantic conventions. It represents the saved
+ // user ID (SUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1002
+ ProcessSavedUserIDKey = attribute.Key("process.saved_user.id")
+
+ // ProcessSavedUserNameKey is the attribute Key conforming to the
+ // "process.saved_user.name" semantic conventions. It represents the
+ // username of the saved user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'operator'
+ ProcessSavedUserNameKey = attribute.Key("process.saved_user.name")
+
+ // ProcessSessionLeaderPIDKey is the attribute Key conforming to the
+ // "process.session_leader.pid" semantic conventions. It represents the PID
+ // of the process's session leader. This is also the session ID (SID) of
+ // the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 14
+ ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid")
+
+ // ProcessUserIDKey is the attribute Key conforming to the
+ // "process.user.id" semantic conventions. It represents the effective user
+ // ID (EUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1001
+ ProcessUserIDKey = attribute.Key("process.user.id")
+
+ // ProcessUserNameKey is the attribute Key conforming to the
+ // "process.user.name" semantic conventions. It represents the username of
+ // the effective user of the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'root'
+ ProcessUserNameKey = attribute.Key("process.user.name")
+
+ // ProcessVpidKey is the attribute Key conforming to the "process.vpid"
+ // semantic conventions. It represents the virtual process identifier.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 12
+ // Note: The process ID within a PID namespace. This is not necessarily
+ // unique across all processes on the host but it is unique within the
+ // process namespace that the process exists within.
+ ProcessVpidKey = attribute.Key("process.vpid")
+)
+
+var (
+ // voluntary
+ ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary")
+ // involuntary
+ ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary")
+)
+
+var (
+ // major
+ ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major")
+ // minor
+ ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor")
+)
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCreationTime returns an attribute KeyValue conforming to the
+// "process.creation.time" semantic conventions. It represents the date and
+// time the process was created, in ISO 8601 format.
+func ProcessCreationTime(val string) attribute.KeyValue {
+ return ProcessCreationTimeKey.String(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessExitCode returns an attribute KeyValue conforming to the
+// "process.exit.code" semantic conventions. It represents the exit code of the
+// process.
+func ProcessExitCode(val int) attribute.KeyValue {
+ return ProcessExitCodeKey.Int(val)
+}
+
+// ProcessExitTime returns an attribute KeyValue conforming to the
+// "process.exit.time" semantic conventions. It represents the date and time
+// the process exited, in ISO 8601 format.
+func ProcessExitTime(val string) attribute.KeyValue {
+ return ProcessExitTimeKey.String(val)
+}
+
+// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the
+// "process.group_leader.pid" semantic conventions. It represents the PID of
+// the process's group leader. This is also the process group ID (PGID) of the
+// process.
+func ProcessGroupLeaderPID(val int) attribute.KeyValue {
+ return ProcessGroupLeaderPIDKey.Int(val)
+}
+
+// ProcessInteractive returns an attribute KeyValue conforming to the
+// "process.interactive" semantic conventions. It represents the whether the
+// process is connected to an interactive shell.
+func ProcessInteractive(val bool) attribute.KeyValue {
+ return ProcessInteractiveKey.Bool(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PPID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessRealUserID returns an attribute KeyValue conforming to the
+// "process.real_user.id" semantic conventions. It represents the real user ID
+// (RUID) of the process.
+func ProcessRealUserID(val int) attribute.KeyValue {
+ return ProcessRealUserIDKey.Int(val)
+}
+
+// ProcessRealUserName returns an attribute KeyValue conforming to the
+// "process.real_user.name" semantic conventions. It represents the username of
+// the real user of the process.
+func ProcessRealUserName(val string) attribute.KeyValue {
+ return ProcessRealUserNameKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessSavedUserID returns an attribute KeyValue conforming to the
+// "process.saved_user.id" semantic conventions. It represents the saved user
+// ID (SUID) of the process.
+func ProcessSavedUserID(val int) attribute.KeyValue {
+ return ProcessSavedUserIDKey.Int(val)
+}
+
+// ProcessSavedUserName returns an attribute KeyValue conforming to the
+// "process.saved_user.name" semantic conventions. It represents the username
+// of the saved user.
+func ProcessSavedUserName(val string) attribute.KeyValue {
+ return ProcessSavedUserNameKey.String(val)
+}
+
+// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the
+// "process.session_leader.pid" semantic conventions. It represents the PID of
+// the process's session leader. This is also the session ID (SID) of the
+// process.
+func ProcessSessionLeaderPID(val int) attribute.KeyValue {
+ return ProcessSessionLeaderPIDKey.Int(val)
+}
+
+// ProcessUserID returns an attribute KeyValue conforming to the
+// "process.user.id" semantic conventions. It represents the effective user ID
+// (EUID) of the process.
+func ProcessUserID(val int) attribute.KeyValue {
+ return ProcessUserIDKey.Int(val)
+}
+
+// ProcessUserName returns an attribute KeyValue conforming to the
+// "process.user.name" semantic conventions. It represents the username of the
+// effective user of the process.
+func ProcessUserName(val string) attribute.KeyValue {
+ return ProcessUserNameKey.String(val)
+}
+
+// ProcessVpid returns an attribute KeyValue conforming to the
+// "process.vpid" semantic conventions. It represents the virtual process
+// identifier.
+func ProcessVpid(val int) attribute.KeyValue {
+ return ProcessVpidKey.Int(val)
+}
+
+// Attributes for process CPU
+const (
+ // ProcessCPUStateKey is the attribute Key conforming to the
+ // "process.cpu.state" semantic conventions. It represents the CPU state of
+ // the process.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ ProcessCPUStateKey = attribute.Key("process.cpu.state")
+)
+
+var (
+ // system
+ ProcessCPUStateSystem = ProcessCPUStateKey.String("system")
+ // user
+ ProcessCPUStateUser = ProcessCPUStateKey.String("user")
+ // wait
+ ProcessCPUStateWait = ProcessCPUStateKey.String("wait")
+)
+
+// Attributes for remote procedure calls.
+const (
+ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.connect_rpc.error_code" semantic conventions. It represents the
+ // [error codes](https://connect.build/docs/protocol/#error-codes) of the
+ // Connect request. Error codes are always string values.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+ // status
+ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+ // the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+
+ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+
+ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be
+ // cast to string for simplicity. Use empty string in case of `null` value.
+ // Omit entirely if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJsonrpcVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // doesn't specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCMessageCompressedSizeKey is the attribute Key conforming to the
+ // "rpc.message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size")
+
+ // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id"
+ // semantic conventions. It represents the mUST be calculated as two
+ // different counters starting from `1` one for sent messages and one for
+ // received message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ RPCMessageIDKey = attribute.Key("rpc.message.id")
+
+ // RPCMessageTypeKey is the attribute Key conforming to the
+ // "rpc.message.type" semantic conventions. It represents the whether this
+ // is a received or sent message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCMessageTypeKey = attribute.Key("rpc.message.type")
+
+ // RPCMessageUncompressedSizeKey is the attribute Key conforming to the
+ // "rpc.message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method"
+ // semantic conventions. It represents the name of the (logical) method
+ // being called, must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service"
+ // semantic conventions. It represents the full (logical) name of the
+ // service being called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // class. The `code.namespace` attribute may be used to store the latter
+ // (despite the attribute name, it may include a class name; e.g., class
+ // with method actually executing the call on the server side, RPC client
+ // stub class on the client side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system"
+ // semantic conventions. It represents a string identifying the remoting
+ // system. See below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCSystemKey = attribute.Key("rpc.system")
+)
+
+var (
+ // cancelled
+ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+ // unknown
+ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+ // invalid_argument
+ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+ // deadline_exceeded
+ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+ // not_found
+ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+ // already_exists
+ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+ // permission_denied
+ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+ // resource_exhausted
+ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+ // failed_precondition
+ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+ // aborted
+ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+ // out_of_range
+ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+ // unimplemented
+ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+ // internal
+ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+ // unavailable
+ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+ // data_loss
+ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+ // unauthenticated
+ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+var (
+ // sent
+ RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
+ // received
+ RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+ // Connect RPC
+ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+ return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+ return RPCJsonrpcErrorMessageKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+ return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// doesn't specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+ return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCMessageCompressedSize returns an attribute KeyValue conforming to the
+// "rpc.message.compressed_size" semantic conventions. It represents the
+// compressed size of the message in bytes.
+func RPCMessageCompressedSize(val int) attribute.KeyValue {
+ return RPCMessageCompressedSizeKey.Int(val)
+}
+
+// RPCMessageID returns an attribute KeyValue conforming to the
+// "rpc.message.id" semantic conventions. It represents the mUST be calculated
+// as two different counters starting from `1` one for sent messages and one
+// for received message.
+func RPCMessageID(val int) attribute.KeyValue {
+ return RPCMessageIDKey.Int(val)
+}
+
+// RPCMessageUncompressedSize returns an attribute KeyValue conforming to
+// the "rpc.message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func RPCMessageUncompressedSize(val int) attribute.KeyValue {
+ return RPCMessageUncompressedSizeKey.Int(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// These attributes may be used to describe the server in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+ // ServerAddressKey is the attribute Key conforming to the "server.address"
+ // semantic conventions. It represents the server domain name if available
+ // without reverse DNS lookup; otherwise, IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the client side, and when communicating through
+ // an intermediary, `server.address` SHOULD represent the server address
+ // behind any intermediaries, for example proxies, if it's available.
+ ServerAddressKey = attribute.Key("server.address")
+
+ // ServerPortKey is the attribute Key conforming to the "server.port"
+ // semantic conventions. It represents the server port number.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ // Note: When observed from the client side, and when communicating through
+ // an intermediary, `server.port` SHOULD represent the server port behind
+ // any intermediaries, for example proxies, if it's available.
+ ServerPortKey = attribute.Key("server.port")
+)
+
+// ServerAddress returns an attribute KeyValue conforming to the
+// "server.address" semantic conventions. It represents the server domain name
+// if available without reverse DNS lookup; otherwise, IP address or Unix
+// domain socket name.
+func ServerAddress(val string) attribute.KeyValue {
+ return ServerAddressKey.String(val)
+}
+
+// ServerPort returns an attribute KeyValue conforming to the "server.port"
+// semantic conventions. It represents the server port number.
+func ServerPort(val int) attribute.KeyValue {
+ return ServerPortKey.Int(val)
+}
+
+// A service instance.
+const (
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID
+ // of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be
+ // globally unique). The ID helps to
+ // distinguish instances of the same service that exist at the same time
+ // (e.g. instances of a horizontally scaled
+ // service).
+ //
+ // Implementations, such as SDKs, are recommended to generate a random
+ // Version 1 or Version 4 [RFC
+ // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an
+ // inherent unique ID as the source of
+ // this value if stability is desirable. In that case, the ID SHOULD be
+ // used as source of a UUID Version 5 and
+ // SHOULD use the following UUID as the namespace:
+ // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`.
+ //
+ // UUIDs are typically recommended, as only an opaque value for the
+ // purposes of identifying a service instance is
+ // needed. Similar to what can be seen in the man page for the
+ // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html)
+ // file, the underlying
+ // data, such as pod name and namespace should be treated as confidential,
+ // being the user's choice to expose it
+ // or not via another resource attribute.
+ //
+ // For applications running behind an application server (like unicorn), we
+ // do not recommend using one identifier
+ // for all processes participating in the application. Instead, it's
+ // recommended each division (e.g. a worker
+ // thread in unicorn) to have its own instance.id.
+ //
+ // It's not recommended for a Collector to set `service.instance.id` if it
+ // can't unambiguously determine the
+ // service instance that is generating that telemetry. For instance,
+ // creating an UUID based on `pod.name` will
+ // likely be wrong, as the Collector might not know from which container
+ // within that pod the telemetry originated.
+ // However, Collectors can set the `service.instance.id` if they can
+ // unambiguously determine the service instance
+ // for that telemetry. This is typically the case for scraping receivers,
+ // as they know the target address and
+ // port.
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceNameKey is the attribute Key conforming to the "service.name"
+ // semantic conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled
+ // services. If the value was not specified, SDKs MUST fallback to
+ // `unknown_service:` concatenated with
+ // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If
+ // `process.executable.name` is not available, the value MUST be set to
+ // `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group
+ // of services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name`
+ // is expected to be unique for all services that have no explicit
+ // namespace defined (so the empty/unspecified namespace is simply one more
+ // valid namespace). Zero-length namespace string is assumed equal to
+ // unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceVersionKey is the attribute Key conforming to the
+ // "service.version" semantic conventions. It represents the version string
+ // of the service API or implementation. The format is not defined by these
+ // conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2.0.0', 'a01dbef8a'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation. The format is not defined by these
+// conventions.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// Session is defined as the period of time encompassing all activities
+// performed by the application and the actions executed by the end user.
+// Consequently, a Session is represented as a collection of Logs, Events, and
+// Spans emitted by the Client Application throughout the Session's duration.
+// Each Session is assigned a unique identifier, which is included as an
+// attribute in the Logs, Events, and Spans generated during the Session's
+// lifecycle.
+// When a session reaches end of life, typically due to user inactivity or
+// session timeout, a new session identifier will be assigned. The previous
+// session identifier may be provided by the instrumentation so that telemetry
+// backends can link the two sessions.
+const (
+ // SessionIDKey is the attribute Key conforming to the "session.id"
+ // semantic conventions. It represents a unique id to identify a session.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '00112233-4455-6677-8899-aabbccddeeff'
+ SessionIDKey = attribute.Key("session.id")
+
+ // SessionPreviousIDKey is the attribute Key conforming to the
+ // "session.previous_id" semantic conventions. It represents the previous
+ // `session.id` for this user, when known.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '00112233-4455-6677-8899-aabbccddeeff'
+ SessionPreviousIDKey = attribute.Key("session.previous_id")
+)
+
+// SessionID returns an attribute KeyValue conforming to the "session.id"
+// semantic conventions. It represents a unique id to identify a session.
+func SessionID(val string) attribute.KeyValue {
+ return SessionIDKey.String(val)
+}
+
+// SessionPreviousID returns an attribute KeyValue conforming to the
+// "session.previous_id" semantic conventions. It represents the previous
+// `session.id` for this user, when known.
+func SessionPreviousID(val string) attribute.KeyValue {
+ return SessionPreviousIDKey.String(val)
+}
+
+// SignalR attributes
+const (
+ // SignalrConnectionStatusKey is the attribute Key conforming to the
+ // "signalr.connection.status" semantic conventions. It represents the
+ // signalR HTTP connection closure status.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'app_shutdown', 'timeout'
+ SignalrConnectionStatusKey = attribute.Key("signalr.connection.status")
+
+ // SignalrTransportKey is the attribute Key conforming to the
+ // "signalr.transport" semantic conventions. It represents the [SignalR
+ // transport
+ // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'web_sockets', 'long_polling'
+ SignalrTransportKey = attribute.Key("signalr.transport")
+)
+
+var (
+ // The connection was closed normally
+ SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure")
+ // The connection was closed due to a timeout
+ SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout")
+ // The connection was closed because the app is shutting down
+ SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown")
+)
+
+var (
+ // ServerSentEvents protocol
+ SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events")
+ // LongPolling protocol
+ SignalrTransportLongPolling = SignalrTransportKey.String("long_polling")
+ // WebSockets protocol
+ SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets")
+)
+
+// These attributes may be used to describe the sender of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API doesn't expose a clear notion of
+// client and server.
+const (
+ // SourceAddressKey is the attribute Key conforming to the "source.address"
+ // semantic conventions. It represents the source address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix
+ // domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the destination side, and when communicating
+ // through an intermediary, `source.address` SHOULD represent the source
+ // address behind any intermediaries, for example proxies, if it's
+ // available.
+ SourceAddressKey = attribute.Key("source.address")
+
+ // SourcePortKey is the attribute Key conforming to the "source.port"
+ // semantic conventions. It represents the source port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3389, 2888
+ SourcePortKey = attribute.Key("source.port")
+)
+
+// SourceAddress returns an attribute KeyValue conforming to the
+// "source.address" semantic conventions. It represents the source address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func SourceAddress(val string) attribute.KeyValue {
+ return SourceAddressKey.String(val)
+}
+
+// SourcePort returns an attribute KeyValue conforming to the "source.port"
+// semantic conventions. It represents the source port number
+func SourcePort(val int) attribute.KeyValue {
+ return SourcePortKey.Int(val)
+}
+
+// Describes System attributes
+const (
+ // SystemDeviceKey is the attribute Key conforming to the "system.device"
+ // semantic conventions. It represents the device identifier
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '(identifier)'
+ SystemDeviceKey = attribute.Key("system.device")
+)
+
+// SystemDevice returns an attribute KeyValue conforming to the
+// "system.device" semantic conventions. It represents the device identifier
+func SystemDevice(val string) attribute.KeyValue {
+ return SystemDeviceKey.String(val)
+}
+
+// Describes System CPU attributes
+const (
+ // SystemCPULogicalNumberKey is the attribute Key conforming to the
+ // "system.cpu.logical_number" semantic conventions. It represents the
+ // logical CPU number [0..n-1]
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1
+ SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
+
+ // SystemCPUStateKey is the attribute Key conforming to the
+ // "system.cpu.state" semantic conventions. It represents the state of the
+ // CPU
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'idle', 'interrupt'
+ SystemCPUStateKey = attribute.Key("system.cpu.state")
+)
+
+var (
+ // user
+ SystemCPUStateUser = SystemCPUStateKey.String("user")
+ // system
+ SystemCPUStateSystem = SystemCPUStateKey.String("system")
+ // nice
+ SystemCPUStateNice = SystemCPUStateKey.String("nice")
+ // idle
+ SystemCPUStateIdle = SystemCPUStateKey.String("idle")
+ // iowait
+ SystemCPUStateIowait = SystemCPUStateKey.String("iowait")
+ // interrupt
+ SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt")
+ // steal
+ SystemCPUStateSteal = SystemCPUStateKey.String("steal")
+)
+
+// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
+// "system.cpu.logical_number" semantic conventions. It represents the logical
+// CPU number [0..n-1]
+func SystemCPULogicalNumber(val int) attribute.KeyValue {
+ return SystemCPULogicalNumberKey.Int(val)
+}
+
+// Describes System Memory attributes
+const (
+ // SystemMemoryStateKey is the attribute Key conforming to the
+ // "system.memory.state" semantic conventions. It represents the memory
+ // state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'free', 'cached'
+ SystemMemoryStateKey = attribute.Key("system.memory.state")
+)
+
+var (
+ // used
+ SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
+ // free
+ SystemMemoryStateFree = SystemMemoryStateKey.String("free")
+ // shared
+ SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
+ // buffers
+ SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
+ // cached
+ SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
+)
+
+// Describes System Memory Paging attributes
+const (
+ // SystemPagingDirectionKey is the attribute Key conforming to the
+ // "system.paging.direction" semantic conventions. It represents the paging
+ // access direction
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'in'
+ SystemPagingDirectionKey = attribute.Key("system.paging.direction")
+
+ // SystemPagingStateKey is the attribute Key conforming to the
+ // "system.paging.state" semantic conventions. It represents the memory
+ // paging state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'free'
+ SystemPagingStateKey = attribute.Key("system.paging.state")
+
+ // SystemPagingTypeKey is the attribute Key conforming to the
+ // "system.paging.type" semantic conventions. It represents the memory
+ // paging type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'minor'
+ SystemPagingTypeKey = attribute.Key("system.paging.type")
+)
+
+var (
+ // in
+ SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
+ // out
+ SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
+)
+
+var (
+ // used
+ SystemPagingStateUsed = SystemPagingStateKey.String("used")
+ // free
+ SystemPagingStateFree = SystemPagingStateKey.String("free")
+)
+
+var (
+ // major
+ SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
+ // minor
+ SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
+)
+
+// Describes Filesystem attributes
+const (
+ // SystemFilesystemModeKey is the attribute Key conforming to the
+ // "system.filesystem.mode" semantic conventions. It represents the
+ // filesystem mode
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'rw, ro'
+ SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
+
+ // SystemFilesystemMountpointKey is the attribute Key conforming to the
+ // "system.filesystem.mountpoint" semantic conventions. It represents the
+ // filesystem mount path
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/mnt/data'
+ SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
+
+ // SystemFilesystemStateKey is the attribute Key conforming to the
+ // "system.filesystem.state" semantic conventions. It represents the
+ // filesystem state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'used'
+ SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
+
+ // SystemFilesystemTypeKey is the attribute Key conforming to the
+ // "system.filesystem.type" semantic conventions. It represents the
+ // filesystem type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ext4'
+ SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
+)
+
+var (
+ // used
+ SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
+ // free
+ SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
+ // reserved
+ SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
+)
+
+var (
+ // fat32
+ SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
+ // exfat
+ SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
+ // ntfs
+ SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
+ // refs
+ SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
+ // hfsplus
+ SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
+ // ext4
+ SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
+)
+
+// SystemFilesystemMode returns an attribute KeyValue conforming to the
+// "system.filesystem.mode" semantic conventions. It represents the filesystem
+// mode
+func SystemFilesystemMode(val string) attribute.KeyValue {
+ return SystemFilesystemModeKey.String(val)
+}
+
+// SystemFilesystemMountpoint returns an attribute KeyValue conforming to
+// the "system.filesystem.mountpoint" semantic conventions. It represents the
+// filesystem mount path
+func SystemFilesystemMountpoint(val string) attribute.KeyValue {
+ return SystemFilesystemMountpointKey.String(val)
+}
+
+// Describes Network attributes
+const (
+ // SystemNetworkStateKey is the attribute Key conforming to the
+ // "system.network.state" semantic conventions. It represents a stateless
+ // protocol MUST NOT set this attribute
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'close_wait'
+ SystemNetworkStateKey = attribute.Key("system.network.state")
+)
+
+var (
+ // close
+ SystemNetworkStateClose = SystemNetworkStateKey.String("close")
+ // close_wait
+ SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait")
+ // closing
+ SystemNetworkStateClosing = SystemNetworkStateKey.String("closing")
+ // delete
+ SystemNetworkStateDelete = SystemNetworkStateKey.String("delete")
+ // established
+ SystemNetworkStateEstablished = SystemNetworkStateKey.String("established")
+ // fin_wait_1
+ SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1")
+ // fin_wait_2
+ SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2")
+ // last_ack
+ SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack")
+ // listen
+ SystemNetworkStateListen = SystemNetworkStateKey.String("listen")
+ // syn_recv
+ SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv")
+ // syn_sent
+ SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent")
+ // time_wait
+ SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait")
+)
+
+// Describes System Process attributes
+const (
+ // SystemProcessStatusKey is the attribute Key conforming to the
+ // "system.process.status" semantic conventions. It represents the process
+ // state, e.g., [Linux Process State
+ // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'running'
+ SystemProcessStatusKey = attribute.Key("system.process.status")
+)
+
+var (
+ // running
+ SystemProcessStatusRunning = SystemProcessStatusKey.String("running")
+ // sleeping
+ SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping")
+ // stopped
+ SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped")
+ // defunct
+ SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct")
+)
+
+// Attributes for telemetry SDK.
+const (
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the
+ // language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
+ // to `opentelemetry`.
+ // If another SDK, like a fork or a vendor-provided implementation, is
+ // used, this SDK MUST set the
+ // `telemetry.sdk.name` attribute to the fully-qualified class or module
+ // name of this SDK's main entry point
+ // or another suitable identifier depending on the language.
+ // The identifier `opentelemetry` is reserved and MUST NOT be used in this
+ // case.
+ // All custom identifiers SHOULD be stable across different versions of an
+ // implementation.
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+
+ // TelemetryDistroNameKey is the attribute Key conforming to the
+ // "telemetry.distro.name" semantic conventions. It represents the name of
+ // the auto instrumentation agent or distribution, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'parts-unlimited-java'
+ // Note: Official auto instrumentation agents and distributions SHOULD set
+ // the `telemetry.distro.name` attribute to
+ // a string starting with `opentelemetry-`, e.g.
+ // `opentelemetry-java-instrumentation`.
+ TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
+
+ // TelemetryDistroVersionKey is the attribute Key conforming to the
+ // "telemetry.distro.version" semantic conventions. It represents the
+ // version string of the auto instrumentation agent or distribution, if
+ // used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.2.3'
+ TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // rust
+ TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// TelemetryDistroName returns an attribute KeyValue conforming to the
+// "telemetry.distro.name" semantic conventions. It represents the name of the
+// auto instrumentation agent or distribution, if used.
+func TelemetryDistroName(val string) attribute.KeyValue {
+ return TelemetryDistroNameKey.String(val)
+}
+
+// TelemetryDistroVersion returns an attribute KeyValue conforming to the
+// "telemetry.distro.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent or distribution, if used.
+func TelemetryDistroVersion(val string) attribute.KeyValue {
+ return TelemetryDistroVersionKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed
+ // to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name"
+ // semantic conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// Semantic convention attributes in the TLS namespace.
+const (
+ // TLSCipherKey is the attribute Key conforming to the "tls.cipher"
+ // semantic conventions. It represents the string indicating the
+ // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5)
+ // used during the current connection.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
+ // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256'
+ // Note: The values allowed for `tls.cipher` MUST be one of the
+ // `Descriptions` of the [registered TLS Cipher
+ // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4).
+ TLSCipherKey = attribute.Key("tls.cipher")
+
+ // TLSClientCertificateKey is the attribute Key conforming to the
+ // "tls.client.certificate" semantic conventions. It represents the
+ // pEM-encoded stand-alone certificate offered by the client. This is
+ // usually mutually-exclusive of `client.certificate_chain` since this
+ // value also exists in that list.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...'
+ TLSClientCertificateKey = attribute.Key("tls.client.certificate")
+
+ // TLSClientCertificateChainKey is the attribute Key conforming to the
+ // "tls.client.certificate_chain" semantic conventions. It represents the
+ // array of PEM-encoded certificates that make up the certificate chain
+ // offered by the client. This is usually mutually-exclusive of
+ // `client.certificate` since that value should be the first certificate in
+ // the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...', 'MI...'
+ TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
+
+ // TLSClientHashMd5Key is the attribute Key conforming to the
+ // "tls.client.hash.md5" semantic conventions. It represents the
+ // certificate fingerprint using the MD5 digest of DER-encoded version of
+ // certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
+ TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
+
+ // TLSClientHashSha1Key is the attribute Key conforming to the
+ // "tls.client.hash.sha1" semantic conventions. It represents the
+ // certificate fingerprint using the SHA1 digest of DER-encoded version of
+ // certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
+ TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
+
+ // TLSClientHashSha256Key is the attribute Key conforming to the
+ // "tls.client.hash.sha256" semantic conventions. It represents the
+ // certificate fingerprint using the SHA256 digest of DER-encoded version
+ // of certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
+ TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
+
+ // TLSClientIssuerKey is the attribute Key conforming to the
+ // "tls.client.issuer" semantic conventions. It represents the
+ // distinguished name of
+ // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
+ // of the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
+ // DC=com'
+ TLSClientIssuerKey = attribute.Key("tls.client.issuer")
+
+ // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
+ // semantic conventions. It represents a hash that identifies clients based
+ // on how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'd4e5b18d6b55c71272893221c96ba240'
+ TLSClientJa3Key = attribute.Key("tls.client.ja3")
+
+ // TLSClientNotAfterKey is the attribute Key conforming to the
+ // "tls.client.not_after" semantic conventions. It represents the date/Time
+ // indicating when client certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021-01-01T00:00:00.000Z'
+ TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
+
+ // TLSClientNotBeforeKey is the attribute Key conforming to the
+ // "tls.client.not_before" semantic conventions. It represents the
+ // date/Time indicating when client certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1970-01-01T00:00:00.000Z'
+ TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
+
+ // TLSClientServerNameKey is the attribute Key conforming to the
+ // "tls.client.server_name" semantic conventions. It represents the also
+ // called an SNI, this tells the server which hostname to which the client
+ // is attempting to connect to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry.io'
+ TLSClientServerNameKey = attribute.Key("tls.client.server_name")
+
+ // TLSClientSubjectKey is the attribute Key conforming to the
+ // "tls.client.subject" semantic conventions. It represents the
+ // distinguished name of subject of the x.509 certificate presented by the
+ // client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com'
+ TLSClientSubjectKey = attribute.Key("tls.client.subject")
+
+ // TLSClientSupportedCiphersKey is the attribute Key conforming to the
+ // "tls.client.supported_ciphers" semantic conventions. It represents the
+ // array of ciphers offered by the client during the client hello.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."'
+ TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
+
+ // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
+ // conventions. It represents the string indicating the curve used for the
+ // given cipher, when applicable
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'secp256r1'
+ TLSCurveKey = attribute.Key("tls.curve")
+
+ // TLSEstablishedKey is the attribute Key conforming to the
+ // "tls.established" semantic conventions. It represents the boolean flag
+ // indicating if the TLS negotiation was successful and transitioned to an
+ // encrypted tunnel.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: True
+ TLSEstablishedKey = attribute.Key("tls.established")
+
+ // TLSNextProtocolKey is the attribute Key conforming to the
+ // "tls.next_protocol" semantic conventions. It represents the string
+ // indicating the protocol being tunneled. Per the values in the [IANA
+ // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
+ // this string should be lower case.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'http/1.1'
+ TLSNextProtocolKey = attribute.Key("tls.next_protocol")
+
+ // TLSProtocolNameKey is the attribute Key conforming to the
+ // "tls.protocol.name" semantic conventions. It represents the normalized
+ // lowercase protocol name parsed from original string of the negotiated
+ // [SSL/TLS protocol
+ // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ TLSProtocolNameKey = attribute.Key("tls.protocol.name")
+
+ // TLSProtocolVersionKey is the attribute Key conforming to the
+ // "tls.protocol.version" semantic conventions. It represents the numeric
+ // part of the version parsed from the original string of the negotiated
+ // [SSL/TLS protocol
+ // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.2', '3'
+ TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
+
+ // TLSResumedKey is the attribute Key conforming to the "tls.resumed"
+ // semantic conventions. It represents the boolean flag indicating if this
+ // TLS connection was resumed from an existing TLS negotiation.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: True
+ TLSResumedKey = attribute.Key("tls.resumed")
+
+ // TLSServerCertificateKey is the attribute Key conforming to the
+ // "tls.server.certificate" semantic conventions. It represents the
+ // pEM-encoded stand-alone certificate offered by the server. This is
+ // usually mutually-exclusive of `server.certificate_chain` since this
+ // value also exists in that list.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...'
+ TLSServerCertificateKey = attribute.Key("tls.server.certificate")
+
+ // TLSServerCertificateChainKey is the attribute Key conforming to the
+ // "tls.server.certificate_chain" semantic conventions. It represents the
+ // array of PEM-encoded certificates that make up the certificate chain
+ // offered by the server. This is usually mutually-exclusive of
+ // `server.certificate` since that value should be the first certificate in
+ // the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...', 'MI...'
+ TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
+
+ // TLSServerHashMd5Key is the attribute Key conforming to the
+ // "tls.server.hash.md5" semantic conventions. It represents the
+ // certificate fingerprint using the MD5 digest of DER-encoded version of
+ // certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
+ TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
+
+ // TLSServerHashSha1Key is the attribute Key conforming to the
+ // "tls.server.hash.sha1" semantic conventions. It represents the
+ // certificate fingerprint using the SHA1 digest of DER-encoded version of
+ // certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
+ TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
+
+ // TLSServerHashSha256Key is the attribute Key conforming to the
+ // "tls.server.hash.sha256" semantic conventions. It represents the
+ // certificate fingerprint using the SHA256 digest of DER-encoded version
+ // of certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
+ TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
+
+ // TLSServerIssuerKey is the attribute Key conforming to the
+ // "tls.server.issuer" semantic conventions. It represents the
+ // distinguished name of
+ // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
+ // of the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
+ // DC=com'
+ TLSServerIssuerKey = attribute.Key("tls.server.issuer")
+
+ // TLSServerJa3sKey is the attribute Key conforming to the
+ // "tls.server.ja3s" semantic conventions. It represents a hash that
+ // identifies servers based on how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'd4e5b18d6b55c71272893221c96ba240'
+ TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
+
+ // TLSServerNotAfterKey is the attribute Key conforming to the
+ // "tls.server.not_after" semantic conventions. It represents the date/Time
+ // indicating when server certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021-01-01T00:00:00.000Z'
+ TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
+
+ // TLSServerNotBeforeKey is the attribute Key conforming to the
+ // "tls.server.not_before" semantic conventions. It represents the
+ // date/Time indicating when server certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1970-01-01T00:00:00.000Z'
+ TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
+
+ // TLSServerSubjectKey is the attribute Key conforming to the
+ // "tls.server.subject" semantic conventions. It represents the
+ // distinguished name of subject of the x.509 certificate presented by the
+ // server.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com'
+ TLSServerSubjectKey = attribute.Key("tls.server.subject")
+)
+
+var (
+ // ssl
+ TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
+ // tls
+ TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
+)
+
+// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
+// semantic conventions. It represents the string indicating the
+// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used
+// during the current connection.
+func TLSCipher(val string) attribute.KeyValue {
+ return TLSCipherKey.String(val)
+}
+
+// TLSClientCertificate returns an attribute KeyValue conforming to the
+// "tls.client.certificate" semantic conventions. It represents the pEM-encoded
+// stand-alone certificate offered by the client. This is usually
+// mutually-exclusive of `client.certificate_chain` since this value also
+// exists in that list.
+func TLSClientCertificate(val string) attribute.KeyValue {
+ return TLSClientCertificateKey.String(val)
+}
+
+// TLSClientCertificateChain returns an attribute KeyValue conforming to the
+// "tls.client.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by
+// the client. This is usually mutually-exclusive of `client.certificate` since
+// that value should be the first certificate in the chain.
+func TLSClientCertificateChain(val ...string) attribute.KeyValue {
+ return TLSClientCertificateChainKey.StringSlice(val)
+}
+
+// TLSClientHashMd5 returns an attribute KeyValue conforming to the
+// "tls.client.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashMd5(val string) attribute.KeyValue {
+ return TLSClientHashMd5Key.String(val)
+}
+
+// TLSClientHashSha1 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha1(val string) attribute.KeyValue {
+ return TLSClientHashSha1Key.String(val)
+}
+
+// TLSClientHashSha256 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha256(val string) attribute.KeyValue {
+ return TLSClientHashSha256Key.String(val)
+}
+
+// TLSClientIssuer returns an attribute KeyValue conforming to the
+// "tls.client.issuer" semantic conventions. It represents the distinguished
+// name of
+// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
+// the issuer of the x.509 certificate presented by the client.
+func TLSClientIssuer(val string) attribute.KeyValue {
+ return TLSClientIssuerKey.String(val)
+}
+
+// TLSClientJa3 returns an attribute KeyValue conforming to the
+// "tls.client.ja3" semantic conventions. It represents a hash that identifies
+// clients based on how they perform an SSL/TLS handshake.
+func TLSClientJa3(val string) attribute.KeyValue {
+ return TLSClientJa3Key.String(val)
+}
+
+// TLSClientNotAfter returns an attribute KeyValue conforming to the
+// "tls.client.not_after" semantic conventions. It represents the date/Time
+// indicating when client certificate is no longer considered valid.
+func TLSClientNotAfter(val string) attribute.KeyValue {
+ return TLSClientNotAfterKey.String(val)
+}
+
+// TLSClientNotBefore returns an attribute KeyValue conforming to the
+// "tls.client.not_before" semantic conventions. It represents the date/Time
+// indicating when client certificate is first considered valid.
+func TLSClientNotBefore(val string) attribute.KeyValue {
+ return TLSClientNotBeforeKey.String(val)
+}
+
+// TLSClientServerName returns an attribute KeyValue conforming to the
+// "tls.client.server_name" semantic conventions. It represents the also called
+// an SNI, this tells the server which hostname to which the client is
+// attempting to connect to.
+func TLSClientServerName(val string) attribute.KeyValue {
+ return TLSClientServerNameKey.String(val)
+}
+
+// TLSClientSubject returns an attribute KeyValue conforming to the
+// "tls.client.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the client.
+func TLSClientSubject(val string) attribute.KeyValue {
+ return TLSClientSubjectKey.String(val)
+}
+
+// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
+// "tls.client.supported_ciphers" semantic conventions. It represents the array
+// of ciphers offered by the client during the client hello.
+func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
+ return TLSClientSupportedCiphersKey.StringSlice(val)
+}
+
+// TLSCurve returns an attribute KeyValue conforming to the "tls.curve"
+// semantic conventions. It represents the string indicating the curve used for
+// the given cipher, when applicable
+func TLSCurve(val string) attribute.KeyValue {
+ return TLSCurveKey.String(val)
+}
+
+// TLSEstablished returns an attribute KeyValue conforming to the
+// "tls.established" semantic conventions. It represents the boolean flag
+// indicating if the TLS negotiation was successful and transitioned to an
+// encrypted tunnel.
+func TLSEstablished(val bool) attribute.KeyValue {
+ return TLSEstablishedKey.Bool(val)
+}
+
+// TLSNextProtocol returns an attribute KeyValue conforming to the
+// "tls.next_protocol" semantic conventions. It represents the string
+// indicating the protocol being tunneled. Per the values in the [IANA
+// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
+// this string should be lower case.
+func TLSNextProtocol(val string) attribute.KeyValue {
+ return TLSNextProtocolKey.String(val)
+}
+
+// TLSProtocolVersion returns an attribute KeyValue conforming to the
+// "tls.protocol.version" semantic conventions. It represents the numeric part
+// of the version parsed from the original string of the negotiated [SSL/TLS
+// protocol
+// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+func TLSProtocolVersion(val string) attribute.KeyValue {
+ return TLSProtocolVersionKey.String(val)
+}
+
+// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
+// semantic conventions. It represents the boolean flag indicating if this TLS
+// connection was resumed from an existing TLS negotiation.
+func TLSResumed(val bool) attribute.KeyValue {
+ return TLSResumedKey.Bool(val)
+}
+
+// TLSServerCertificate returns an attribute KeyValue conforming to the
+// "tls.server.certificate" semantic conventions. It represents the pEM-encoded
+// stand-alone certificate offered by the server. This is usually
+// mutually-exclusive of `server.certificate_chain` since this value also
+// exists in that list.
+func TLSServerCertificate(val string) attribute.KeyValue {
+ return TLSServerCertificateKey.String(val)
+}
+
+// TLSServerCertificateChain returns an attribute KeyValue conforming to the
+// "tls.server.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by
+// the server. This is usually mutually-exclusive of `server.certificate` since
+// that value should be the first certificate in the chain.
+func TLSServerCertificateChain(val ...string) attribute.KeyValue {
+ return TLSServerCertificateChainKey.StringSlice(val)
+}
+
+// TLSServerHashMd5 returns an attribute KeyValue conforming to the
+// "tls.server.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashMd5(val string) attribute.KeyValue {
+ return TLSServerHashMd5Key.String(val)
+}
+
+// TLSServerHashSha1 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha1(val string) attribute.KeyValue {
+ return TLSServerHashSha1Key.String(val)
+}
+
+// TLSServerHashSha256 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha256(val string) attribute.KeyValue {
+ return TLSServerHashSha256Key.String(val)
+}
+
+// TLSServerIssuer returns an attribute KeyValue conforming to the
+// "tls.server.issuer" semantic conventions. It represents the distinguished
+// name of
+// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
+// the issuer of the x.509 certificate presented by the client.
+func TLSServerIssuer(val string) attribute.KeyValue {
+ return TLSServerIssuerKey.String(val)
+}
+
+// TLSServerJa3s returns an attribute KeyValue conforming to the
+// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
+// servers based on how they perform an SSL/TLS handshake.
+func TLSServerJa3s(val string) attribute.KeyValue {
+ return TLSServerJa3sKey.String(val)
+}
+
+// TLSServerNotAfter returns an attribute KeyValue conforming to the
+// "tls.server.not_after" semantic conventions. It represents the date/Time
+// indicating when server certificate is no longer considered valid.
+func TLSServerNotAfter(val string) attribute.KeyValue {
+ return TLSServerNotAfterKey.String(val)
+}
+
+// TLSServerNotBefore returns an attribute KeyValue conforming to the
+// "tls.server.not_before" semantic conventions. It represents the date/Time
+// indicating when server certificate is first considered valid.
+func TLSServerNotBefore(val string) attribute.KeyValue {
+ return TLSServerNotBeforeKey.String(val)
+}
+
+// TLSServerSubject returns an attribute KeyValue conforming to the
+// "tls.server.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the server.
+func TLSServerSubject(val string) attribute.KeyValue {
+ return TLSServerSubjectKey.String(val)
+}
+
+// Attributes describing URL.
+const (
+ // URLDomainKey is the attribute Key conforming to the "url.domain"
+ // semantic conventions. It represents the domain extracted from the
+ // `url.full`, such as "opentelemetry.io".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2',
+ // '[1080:0:0:0:8:800:200C:417A]'
+ // Note: In some cases a URL may refer to an IP and/or port directly,
+ // without a domain name. In this case, the IP address would go to the
+ // domain field. If the URL contains a [literal IPv6
+ // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by
+ // `[` and `]`, the `[` and `]` characters should also be captured in the
+ // domain field.
+ URLDomainKey = attribute.Key("url.domain")
+
+ // URLExtensionKey is the attribute Key conforming to the "url.extension"
+ // semantic conventions. It represents the file extension extracted from
+ // the `url.full`, excluding the leading dot.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'png', 'gz'
+ // Note: The file extension is only set if it exists, as not every url has
+ // a file extension. When the file name has multiple extensions
+ // `example.tar.gz`, only the last one should be captured `gz`, not
+ // `tar.gz`.
+ URLExtensionKey = attribute.Key("url.extension")
+
+ // URLFragmentKey is the attribute Key conforming to the "url.fragment"
+ // semantic conventions. It represents the [URI
+ // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'SemConv'
+ URLFragmentKey = attribute.Key("url.fragment")
+
+ // URLFullKey is the attribute Key conforming to the "url.full" semantic
+ // conventions. It represents the absolute URL describing a network
+ // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
+ // '//localhost'
+ // Note: For network calls, URL usually has
+ // `scheme://host[:port][path][?query][#fragment]` format, where the
+ // fragment is not transmitted over HTTP, but if it is known, it SHOULD be
+ // included nevertheless.
+ // `url.full` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case username and
+ // password SHOULD be redacted and attribute's value SHOULD be
+ // `https://REDACTED:REDACTED@www.example.com/`.
+ // `url.full` SHOULD capture the absolute URL when it is available (or can
+ // be reconstructed). Sensitive content provided in `url.full` SHOULD be
+ // scrubbed when instrumentations can identify it.
+ URLFullKey = attribute.Key("url.full")
+
+ // URLOriginalKey is the attribute Key conforming to the "url.original"
+ // semantic conventions. It represents the unmodified original URL as seen
+ // in the event source.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
+ // 'search?q=OpenTelemetry'
+ // Note: In network monitoring, the observed URL may be a full URL, whereas
+ // in access logs, the URL is often just represented as a path. This field
+ // is meant to represent the URL as it was observed, complete or not.
+ // `url.original` might contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case password and
+ // username SHOULD NOT be redacted and attribute's value SHOULD remain the
+ // same.
+ URLOriginalKey = attribute.Key("url.original")
+
+ // URLPathKey is the attribute Key conforming to the "url.path" semantic
+ // conventions. It represents the [URI
+ // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/search'
+ // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ URLPathKey = attribute.Key("url.path")
+
+ // URLPortKey is the attribute Key conforming to the "url.port" semantic
+ // conventions. It represents the port extracted from the `url.full`
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 443
+ URLPortKey = attribute.Key("url.port")
+
+ // URLQueryKey is the attribute Key conforming to the "url.query" semantic
+ // conventions. It represents the [URI
+ // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'q=OpenTelemetry'
+ // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ URLQueryKey = attribute.Key("url.query")
+
+ // URLRegisteredDomainKey is the attribute Key conforming to the
+ // "url.registered_domain" semantic conventions. It represents the highest
+ // registered url domain, stripped of the subdomain.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'example.com', 'foo.co.uk'
+ // Note: This value can be determined precisely with the [public suffix
+ // list](http://publicsuffix.org). For example, the registered domain for
+ // `foo.example.com` is `example.com`. Trying to approximate this by simply
+ // taking the last two labels will not work well for TLDs such as `co.uk`.
+ URLRegisteredDomainKey = attribute.Key("url.registered_domain")
+
+ // URLSchemeKey is the attribute Key conforming to the "url.scheme"
+ // semantic conventions. It represents the [URI
+ // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+ // identifying the used protocol.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'https', 'ftp', 'telnet'
+ URLSchemeKey = attribute.Key("url.scheme")
+
+ // URLSubdomainKey is the attribute Key conforming to the "url.subdomain"
+ // semantic conventions. It represents the subdomain portion of a fully
+ // qualified domain name includes all of the names except the host name
+ // under the registered_domain. In a partially qualified domain, or if the
+ // qualification level of the full name cannot be determined, subdomain
+ // contains all of the names below the registered domain.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'east', 'sub2.sub1'
+ // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If
+ // the domain has multiple levels of subdomain, such as
+ // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`,
+ // with no trailing period.
+ URLSubdomainKey = attribute.Key("url.subdomain")
+
+ // URLTemplateKey is the attribute Key conforming to the "url.template"
+ // semantic conventions. It represents the low-cardinality template of an
+ // [absolute path
+ // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/users/{id}', '/users/:id', '/users?id={id}'
+ URLTemplateKey = attribute.Key("url.template")
+
+ // URLTopLevelDomainKey is the attribute Key conforming to the
+ // "url.top_level_domain" semantic conventions. It represents the effective
+ // top level domain (eTLD), also known as the domain suffix, is the last
+ // part of the domain name. For example, the top level domain for
+ // example.com is `com`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'com', 'co.uk'
+ // Note: This value can be determined precisely with the [public suffix
+ // list](http://publicsuffix.org).
+ URLTopLevelDomainKey = attribute.Key("url.top_level_domain")
+)
+
+// URLDomain returns an attribute KeyValue conforming to the "url.domain"
+// semantic conventions. It represents the domain extracted from the
+// `url.full`, such as "opentelemetry.io".
+func URLDomain(val string) attribute.KeyValue {
+ return URLDomainKey.String(val)
+}
+
+// URLExtension returns an attribute KeyValue conforming to the
+// "url.extension" semantic conventions. It represents the file extension
+// extracted from the `url.full`, excluding the leading dot.
+func URLExtension(val string) attribute.KeyValue {
+ return URLExtensionKey.String(val)
+}
+
+// URLFragment returns an attribute KeyValue conforming to the
+// "url.fragment" semantic conventions. It represents the [URI
+// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+func URLFragment(val string) attribute.KeyValue {
+ return URLFragmentKey.String(val)
+}
+
+// URLFull returns an attribute KeyValue conforming to the "url.full"
+// semantic conventions. It represents the absolute URL describing a network
+// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+func URLFull(val string) attribute.KeyValue {
+ return URLFullKey.String(val)
+}
+
+// URLOriginal returns an attribute KeyValue conforming to the
+// "url.original" semantic conventions. It represents the unmodified original
+// URL as seen in the event source.
+func URLOriginal(val string) attribute.KeyValue {
+ return URLOriginalKey.String(val)
+}
+
+// URLPath returns an attribute KeyValue conforming to the "url.path"
+// semantic conventions. It represents the [URI
+// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+func URLPath(val string) attribute.KeyValue {
+ return URLPathKey.String(val)
+}
+
+// URLPort returns an attribute KeyValue conforming to the "url.port"
+// semantic conventions. It represents the port extracted from the `url.full`
+func URLPort(val int) attribute.KeyValue {
+ return URLPortKey.Int(val)
+}
+
+// URLQuery returns an attribute KeyValue conforming to the "url.query"
+// semantic conventions. It represents the [URI
+// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+func URLQuery(val string) attribute.KeyValue {
+ return URLQueryKey.String(val)
+}
+
+// URLRegisteredDomain returns an attribute KeyValue conforming to the
+// "url.registered_domain" semantic conventions. It represents the highest
+// registered url domain, stripped of the subdomain.
+func URLRegisteredDomain(val string) attribute.KeyValue {
+ return URLRegisteredDomainKey.String(val)
+}
+
+// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
+// semantic conventions. It represents the [URI
+// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+// identifying the used protocol.
+func URLScheme(val string) attribute.KeyValue {
+ return URLSchemeKey.String(val)
+}
+
+// URLSubdomain returns an attribute KeyValue conforming to the
+// "url.subdomain" semantic conventions. It represents the subdomain portion of
+// a fully qualified domain name includes all of the names except the host name
+// under the registered_domain. In a partially qualified domain, or if the
+// qualification level of the full name cannot be determined, subdomain
+// contains all of the names below the registered domain.
+func URLSubdomain(val string) attribute.KeyValue {
+ return URLSubdomainKey.String(val)
+}
+
+// URLTemplate returns an attribute KeyValue conforming to the
+// "url.template" semantic conventions. It represents the low-cardinality
+// template of an [absolute path
+// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
+func URLTemplate(val string) attribute.KeyValue {
+ return URLTemplateKey.String(val)
+}
+
+// URLTopLevelDomain returns an attribute KeyValue conforming to the
+// "url.top_level_domain" semantic conventions. It represents the effective top
+// level domain (eTLD), also known as the domain suffix, is the last part of
+// the domain name. For example, the top level domain for example.com is `com`.
+func URLTopLevelDomain(val string) attribute.KeyValue {
+ return URLTopLevelDomainKey.String(val)
+}
+
+// Describes user-agent attributes.
+const (
+ // UserAgentNameKey is the attribute Key conforming to the
+ // "user_agent.name" semantic conventions. It represents the name of the
+ // user-agent extracted from original. Usually refers to the browser's
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Safari', 'YourApp'
+ // Note: [Example](https://www.whatsmyua.info) of extracting browser's name
+ // from original string. In the case of using a user-agent for non-browser
+ // products, such as microservices with multiple names/versions inside the
+ // `user_agent.original`, the most significant name SHOULD be selected. In
+ // such a scenario it should align with `user_agent.version`
+ UserAgentNameKey = attribute.Key("user_agent.name")
+
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of
+ // the [HTTP
+ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+ // header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
+ // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
+ // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0
+ // grpc-java-okhttp/1.27.2'
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+
+ // UserAgentVersionKey is the attribute Key conforming to the
+ // "user_agent.version" semantic conventions. It represents the version of
+ // the user-agent extracted from original. Usually refers to the browser's
+ // version
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '14.1.2', '1.0.0'
+ // Note: [Example](https://www.whatsmyua.info) of extracting browser's
+ // version from original string. In the case of using a user-agent for
+ // non-browser products, such as microservices with multiple names/versions
+ // inside the `user_agent.original`, the most significant version SHOULD be
+ // selected. In such a scenario it should align with `user_agent.name`
+ UserAgentVersionKey = attribute.Key("user_agent.version")
+)
+
+// UserAgentName returns an attribute KeyValue conforming to the
+// "user_agent.name" semantic conventions. It represents the name of the
+// user-agent extracted from original. Usually refers to the browser's name.
+func UserAgentName(val string) attribute.KeyValue {
+ return UserAgentNameKey.String(val)
+}
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
+
+// UserAgentVersion returns an attribute KeyValue conforming to the
+// "user_agent.version" semantic conventions. It represents the version of the
+// user-agent extracted from original. Usually refers to the browser's version
+func UserAgentVersion(val string) attribute.KeyValue {
+ return UserAgentVersionKey.String(val)
+}
+
+// The attributes used to describe the packaged software running the
+// application code.
+const (
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the
+ // additional description of the web engine (e.g. detailed version and
+ // edition information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of
+ // the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+)
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
new file mode 100644
index 000000000..d031bbea7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the v1.26.0
+// version of the OpenTelemetry semantic conventions.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
new file mode 100644
index 000000000..bfaee0d56
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
new file mode 100644
index 000000000..fcdb9f485
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
@@ -0,0 +1,1307 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+
+const (
+
+ // ContainerCPUTime is the metric conforming to the "container.cpu.time"
+ // semantic conventions. It represents the total CPU time consumed.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ ContainerCPUTimeName = "container.cpu.time"
+ ContainerCPUTimeUnit = "s"
+ ContainerCPUTimeDescription = "Total CPU time consumed"
+
+ // ContainerMemoryUsage is the metric conforming to the
+ // "container.memory.usage" semantic conventions. It represents the memory
+ // usage of the container.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ContainerMemoryUsageName = "container.memory.usage"
+ ContainerMemoryUsageUnit = "By"
+ ContainerMemoryUsageDescription = "Memory usage of the container."
+
+ // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic
+ // conventions. It represents the disk bytes for the container.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ContainerDiskIoName = "container.disk.io"
+ ContainerDiskIoUnit = "By"
+ ContainerDiskIoDescription = "Disk bytes for the container."
+
+ // ContainerNetworkIo is the metric conforming to the "container.network.io"
+ // semantic conventions. It represents the network bytes for the container.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ContainerNetworkIoName = "container.network.io"
+ ContainerNetworkIoUnit = "By"
+ ContainerNetworkIoDescription = "Network bytes for the container."
+
+ // DBClientOperationDuration is the metric conforming to the
+ // "db.client.operation.duration" semantic conventions. It represents the
+ // duration of database client operations.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DBClientOperationDurationName = "db.client.operation.duration"
+ DBClientOperationDurationUnit = "s"
+ DBClientOperationDurationDescription = "Duration of database client operations."
+
+ // DBClientConnectionCount is the metric conforming to the
+ // "db.client.connection.count" semantic conventions. It represents the number
+ // of connections that are currently in state described by the `state`
+ // attribute.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionCountName = "db.client.connection.count"
+ DBClientConnectionCountUnit = "{connection}"
+ DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute"
+
+ // DBClientConnectionIdleMax is the metric conforming to the
+ // "db.client.connection.idle.max" semantic conventions. It represents the
+ // maximum number of idle open connections allowed.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionIdleMaxName = "db.client.connection.idle.max"
+ DBClientConnectionIdleMaxUnit = "{connection}"
+ DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed"
+
+ // DBClientConnectionIdleMin is the metric conforming to the
+ // "db.client.connection.idle.min" semantic conventions. It represents the
+ // minimum number of idle open connections allowed.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionIdleMinName = "db.client.connection.idle.min"
+ DBClientConnectionIdleMinUnit = "{connection}"
+ DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed"
+
+ // DBClientConnectionMax is the metric conforming to the
+ // "db.client.connection.max" semantic conventions. It represents the maximum
+ // number of open connections allowed.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionMaxName = "db.client.connection.max"
+ DBClientConnectionMaxUnit = "{connection}"
+ DBClientConnectionMaxDescription = "The maximum number of open connections allowed"
+
+ // DBClientConnectionPendingRequests is the metric conforming to the
+ // "db.client.connection.pending_requests" semantic conventions. It represents
+ // the number of pending requests for an open connection, cumulative for the
+ // entire pool.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests"
+ DBClientConnectionPendingRequestsUnit = "{request}"
+ DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool"
+
+ // DBClientConnectionTimeouts is the metric conforming to the
+ // "db.client.connection.timeouts" semantic conventions. It represents the
+ // number of connection timeouts that have occurred trying to obtain a
+ // connection from the pool.
+ // Instrument: counter
+ // Unit: {timeout}
+ // Stability: Experimental
+ DBClientConnectionTimeoutsName = "db.client.connection.timeouts"
+ DBClientConnectionTimeoutsUnit = "{timeout}"
+ DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool"
+
+ // DBClientConnectionCreateTime is the metric conforming to the
+ // "db.client.connection.create_time" semantic conventions. It represents the
+ // time it took to create a new connection.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DBClientConnectionCreateTimeName = "db.client.connection.create_time"
+ DBClientConnectionCreateTimeUnit = "s"
+ DBClientConnectionCreateTimeDescription = "The time it took to create a new connection"
+
+ // DBClientConnectionWaitTime is the metric conforming to the
+ // "db.client.connection.wait_time" semantic conventions. It represents the
+ // time it took to obtain an open connection from the pool.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DBClientConnectionWaitTimeName = "db.client.connection.wait_time"
+ DBClientConnectionWaitTimeUnit = "s"
+ DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool"
+
+ // DBClientConnectionUseTime is the metric conforming to the
+ // "db.client.connection.use_time" semantic conventions. It represents the time
+ // between borrowing a connection and returning it to the pool.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DBClientConnectionUseTimeName = "db.client.connection.use_time"
+ DBClientConnectionUseTimeUnit = "s"
+ DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool"
+
+ // DBClientConnectionsUsage is the metric conforming to the
+ // "db.client.connections.usage" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.count` instead.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsUsageName = "db.client.connections.usage"
+ DBClientConnectionsUsageUnit = "{connection}"
+ DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead."
+
+ // DBClientConnectionsIdleMax is the metric conforming to the
+ // "db.client.connections.idle.max" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.idle.max` instead.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsIdleMaxName = "db.client.connections.idle.max"
+ DBClientConnectionsIdleMaxUnit = "{connection}"
+ DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead."
+
+ // DBClientConnectionsIdleMin is the metric conforming to the
+ // "db.client.connections.idle.min" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.idle.min` instead.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsIdleMinName = "db.client.connections.idle.min"
+ DBClientConnectionsIdleMinUnit = "{connection}"
+ DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead."
+
+ // DBClientConnectionsMax is the metric conforming to the
+ // "db.client.connections.max" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.max` instead.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsMaxName = "db.client.connections.max"
+ DBClientConnectionsMaxUnit = "{connection}"
+ DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead."
+
+ // DBClientConnectionsPendingRequests is the metric conforming to the
+ // "db.client.connections.pending_requests" semantic conventions. It represents
+ // the deprecated, use `db.client.connection.pending_requests` instead.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests"
+ DBClientConnectionsPendingRequestsUnit = "{request}"
+ DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead."
+
+ // DBClientConnectionsTimeouts is the metric conforming to the
+ // "db.client.connections.timeouts" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.timeouts` instead.
+ // Instrument: counter
+ // Unit: {timeout}
+ // Stability: Experimental
+ DBClientConnectionsTimeoutsName = "db.client.connections.timeouts"
+ DBClientConnectionsTimeoutsUnit = "{timeout}"
+ DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead."
+
+ // DBClientConnectionsCreateTime is the metric conforming to the
+ // "db.client.connections.create_time" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.create_time` instead. Note: the unit
+ // also changed from `ms` to `s`.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ DBClientConnectionsCreateTimeName = "db.client.connections.create_time"
+ DBClientConnectionsCreateTimeUnit = "ms"
+ DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`."
+
+ // DBClientConnectionsWaitTime is the metric conforming to the
+ // "db.client.connections.wait_time" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.wait_time` instead. Note: the unit
+ // also changed from `ms` to `s`.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ DBClientConnectionsWaitTimeName = "db.client.connections.wait_time"
+ DBClientConnectionsWaitTimeUnit = "ms"
+ DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`."
+
+ // DBClientConnectionsUseTime is the metric conforming to the
+ // "db.client.connections.use_time" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.use_time` instead. Note: the unit also
+ // changed from `ms` to `s`.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ DBClientConnectionsUseTimeName = "db.client.connections.use_time"
+ DBClientConnectionsUseTimeUnit = "ms"
+ DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`."
+
+ // DNSLookupDuration is the metric conforming to the "dns.lookup.duration"
+ // semantic conventions. It represents the measures the time taken to perform a
+ // DNS lookup.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DNSLookupDurationName = "dns.lookup.duration"
+ DNSLookupDurationUnit = "s"
+ DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup."
+
+ // AspnetcoreRoutingMatchAttempts is the metric conforming to the
+ // "aspnetcore.routing.match_attempts" semantic conventions. It represents the
+ // number of requests that were attempted to be matched to an endpoint.
+ // Instrument: counter
+ // Unit: {match_attempt}
+ // Stability: Stable
+ AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts"
+ AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}"
+ AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint."
+
+ // AspnetcoreDiagnosticsExceptions is the metric conforming to the
+ // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the
+ // number of exceptions caught by exception handling middleware.
+ // Instrument: counter
+ // Unit: {exception}
+ // Stability: Stable
+ AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions"
+ AspnetcoreDiagnosticsExceptionsUnit = "{exception}"
+ AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware."
+
+ // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the
+ // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It
+ // represents the number of requests that are currently active on the server
+ // that hold a rate limiting lease.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Stable
+ AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases"
+ AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}"
+ AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease."
+
+ // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the
+ // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It
+ // represents the duration of rate limiting lease held by requests on the
+ // server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration"
+ AspnetcoreRateLimitingRequestLeaseDurationUnit = "s"
+ AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server."
+
+ // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the
+ // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It
+ // represents the time the request spent in a queue waiting to acquire a rate
+ // limiting lease.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue"
+ AspnetcoreRateLimitingRequestTimeInQueueUnit = "s"
+ AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease."
+
+ // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the
+ // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It
+ // represents the number of requests that are currently queued, waiting to
+ // acquire a rate limiting lease.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Stable
+ AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests"
+ AspnetcoreRateLimitingQueuedRequestsUnit = "{request}"
+ AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease."
+
+ // AspnetcoreRateLimitingRequests is the metric conforming to the
+ // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the
+ // number of requests that tried to acquire a rate limiting lease.
+ // Instrument: counter
+ // Unit: {request}
+ // Stability: Stable
+ AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests"
+ AspnetcoreRateLimitingRequestsUnit = "{request}"
+ AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease."
+
+ // KestrelActiveConnections is the metric conforming to the
+ // "kestrel.active_connections" semantic conventions. It represents the number
+ // of connections that are currently active on the server.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Stable
+ KestrelActiveConnectionsName = "kestrel.active_connections"
+ KestrelActiveConnectionsUnit = "{connection}"
+ KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server."
+
+ // KestrelConnectionDuration is the metric conforming to the
+ // "kestrel.connection.duration" semantic conventions. It represents the
+ // duration of connections on the server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ KestrelConnectionDurationName = "kestrel.connection.duration"
+ KestrelConnectionDurationUnit = "s"
+ KestrelConnectionDurationDescription = "The duration of connections on the server."
+
+ // KestrelRejectedConnections is the metric conforming to the
+ // "kestrel.rejected_connections" semantic conventions. It represents the
+ // number of connections rejected by the server.
+ // Instrument: counter
+ // Unit: {connection}
+ // Stability: Stable
+ KestrelRejectedConnectionsName = "kestrel.rejected_connections"
+ KestrelRejectedConnectionsUnit = "{connection}"
+ KestrelRejectedConnectionsDescription = "Number of connections rejected by the server."
+
+ // KestrelQueuedConnections is the metric conforming to the
+ // "kestrel.queued_connections" semantic conventions. It represents the number
+ // of connections that are currently queued and are waiting to start.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Stable
+ KestrelQueuedConnectionsName = "kestrel.queued_connections"
+ KestrelQueuedConnectionsUnit = "{connection}"
+ KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start."
+
+ // KestrelQueuedRequests is the metric conforming to the
+ // "kestrel.queued_requests" semantic conventions. It represents the number of
+ // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are
+ // currently queued and are waiting to start.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Stable
+ KestrelQueuedRequestsName = "kestrel.queued_requests"
+ KestrelQueuedRequestsUnit = "{request}"
+ KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start."
+
+ // KestrelUpgradedConnections is the metric conforming to the
+ // "kestrel.upgraded_connections" semantic conventions. It represents the
+ // number of connections that are currently upgraded (WebSockets). .
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Stable
+ KestrelUpgradedConnectionsName = "kestrel.upgraded_connections"
+ KestrelUpgradedConnectionsUnit = "{connection}"
+ KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ."
+
+ // KestrelTLSHandshakeDuration is the metric conforming to the
+ // "kestrel.tls_handshake.duration" semantic conventions. It represents the
+ // duration of TLS handshakes on the server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration"
+ KestrelTLSHandshakeDurationUnit = "s"
+ KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server."
+
+ // KestrelActiveTLSHandshakes is the metric conforming to the
+ // "kestrel.active_tls_handshakes" semantic conventions. It represents the
+ // number of TLS handshakes that are currently in progress on the server.
+ // Instrument: updowncounter
+ // Unit: {handshake}
+ // Stability: Stable
+ KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes"
+ KestrelActiveTLSHandshakesUnit = "{handshake}"
+ KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server."
+
+ // SignalrServerConnectionDuration is the metric conforming to the
+ // "signalr.server.connection.duration" semantic conventions. It represents the
+ // duration of connections on the server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ SignalrServerConnectionDurationName = "signalr.server.connection.duration"
+ SignalrServerConnectionDurationUnit = "s"
+ SignalrServerConnectionDurationDescription = "The duration of connections on the server."
+
+ // SignalrServerActiveConnections is the metric conforming to the
+ // "signalr.server.active_connections" semantic conventions. It represents the
+ // number of connections that are currently active on the server.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Stable
+ SignalrServerActiveConnectionsName = "signalr.server.active_connections"
+ SignalrServerActiveConnectionsUnit = "{connection}"
+ SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server."
+
+ // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration"
+ // semantic conventions. It represents the measures the duration of the
+ // function's logic execution.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ FaaSInvokeDurationName = "faas.invoke_duration"
+ FaaSInvokeDurationUnit = "s"
+ FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution"
+
+ // FaaSInitDuration is the metric conforming to the "faas.init_duration"
+ // semantic conventions. It represents the measures the duration of the
+ // function's initialization, such as a cold start.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ FaaSInitDurationName = "faas.init_duration"
+ FaaSInitDurationUnit = "s"
+ FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start"
+
+ // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic
+ // conventions. It represents the number of invocation cold starts.
+ // Instrument: counter
+ // Unit: {coldstart}
+ // Stability: Experimental
+ FaaSColdstartsName = "faas.coldstarts"
+ FaaSColdstartsUnit = "{coldstart}"
+ FaaSColdstartsDescription = "Number of invocation cold starts"
+
+ // FaaSErrors is the metric conforming to the "faas.errors" semantic
+ // conventions. It represents the number of invocation errors.
+ // Instrument: counter
+ // Unit: {error}
+ // Stability: Experimental
+ FaaSErrorsName = "faas.errors"
+ FaaSErrorsUnit = "{error}"
+ FaaSErrorsDescription = "Number of invocation errors"
+
+ // FaaSInvocations is the metric conforming to the "faas.invocations" semantic
+ // conventions. It represents the number of successful invocations.
+ // Instrument: counter
+ // Unit: {invocation}
+ // Stability: Experimental
+ FaaSInvocationsName = "faas.invocations"
+ FaaSInvocationsUnit = "{invocation}"
+ FaaSInvocationsDescription = "Number of successful invocations"
+
+ // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic
+ // conventions. It represents the number of invocation timeouts.
+ // Instrument: counter
+ // Unit: {timeout}
+ // Stability: Experimental
+ FaaSTimeoutsName = "faas.timeouts"
+ FaaSTimeoutsUnit = "{timeout}"
+ FaaSTimeoutsDescription = "Number of invocation timeouts"
+
+ // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic
+ // conventions. It represents the distribution of max memory usage per
+ // invocation.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ FaaSMemUsageName = "faas.mem_usage"
+ FaaSMemUsageUnit = "By"
+ FaaSMemUsageDescription = "Distribution of max memory usage per invocation"
+
+ // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic
+ // conventions. It represents the distribution of CPU usage per invocation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ FaaSCPUUsageName = "faas.cpu_usage"
+ FaaSCPUUsageUnit = "s"
+ FaaSCPUUsageDescription = "Distribution of CPU usage per invocation"
+
+ // FaaSNetIo is the metric conforming to the "faas.net_io" semantic
+ // conventions. It represents the distribution of net I/O usage per invocation.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ FaaSNetIoName = "faas.net_io"
+ FaaSNetIoUnit = "By"
+ FaaSNetIoDescription = "Distribution of net I/O usage per invocation"
+
+ // HTTPServerRequestDuration is the metric conforming to the
+ // "http.server.request.duration" semantic conventions. It represents the
+ // duration of HTTP server requests.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ HTTPServerRequestDurationName = "http.server.request.duration"
+ HTTPServerRequestDurationUnit = "s"
+ HTTPServerRequestDurationDescription = "Duration of HTTP server requests."
+
+ // HTTPServerActiveRequests is the metric conforming to the
+ // "http.server.active_requests" semantic conventions. It represents the number
+ // of active HTTP server requests.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ HTTPServerActiveRequestsName = "http.server.active_requests"
+ HTTPServerActiveRequestsUnit = "{request}"
+ HTTPServerActiveRequestsDescription = "Number of active HTTP server requests."
+
+ // HTTPServerRequestBodySize is the metric conforming to the
+ // "http.server.request.body.size" semantic conventions. It represents the size
+ // of HTTP server request bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPServerRequestBodySizeName = "http.server.request.body.size"
+ HTTPServerRequestBodySizeUnit = "By"
+ HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies."
+
+ // HTTPServerResponseBodySize is the metric conforming to the
+ // "http.server.response.body.size" semantic conventions. It represents the
+ // size of HTTP server response bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPServerResponseBodySizeName = "http.server.response.body.size"
+ HTTPServerResponseBodySizeUnit = "By"
+ HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies."
+
+ // HTTPClientRequestDuration is the metric conforming to the
+ // "http.client.request.duration" semantic conventions. It represents the
+ // duration of HTTP client requests.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ HTTPClientRequestDurationName = "http.client.request.duration"
+ HTTPClientRequestDurationUnit = "s"
+ HTTPClientRequestDurationDescription = "Duration of HTTP client requests."
+
+ // HTTPClientRequestBodySize is the metric conforming to the
+ // "http.client.request.body.size" semantic conventions. It represents the size
+ // of HTTP client request bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPClientRequestBodySizeName = "http.client.request.body.size"
+ HTTPClientRequestBodySizeUnit = "By"
+ HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies."
+
+ // HTTPClientResponseBodySize is the metric conforming to the
+ // "http.client.response.body.size" semantic conventions. It represents the
+ // size of HTTP client response bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPClientResponseBodySizeName = "http.client.response.body.size"
+ HTTPClientResponseBodySizeUnit = "By"
+ HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies."
+
+ // HTTPClientOpenConnections is the metric conforming to the
+ // "http.client.open_connections" semantic conventions. It represents the
+ // number of outbound HTTP connections that are currently active or idle on the
+ // client.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ HTTPClientOpenConnectionsName = "http.client.open_connections"
+ HTTPClientOpenConnectionsUnit = "{connection}"
+ HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client."
+
+ // HTTPClientConnectionDuration is the metric conforming to the
+ // "http.client.connection.duration" semantic conventions. It represents the
+ // duration of the successfully established outbound HTTP connections.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ HTTPClientConnectionDurationName = "http.client.connection.duration"
+ HTTPClientConnectionDurationUnit = "s"
+ HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections."
+
+ // HTTPClientActiveRequests is the metric conforming to the
+ // "http.client.active_requests" semantic conventions. It represents the number
+ // of active HTTP requests.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ HTTPClientActiveRequestsName = "http.client.active_requests"
+ HTTPClientActiveRequestsUnit = "{request}"
+ HTTPClientActiveRequestsDescription = "Number of active HTTP requests."
+
+ // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic
+ // conventions. It represents the measure of initial memory requested.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ JvmMemoryInitName = "jvm.memory.init"
+ JvmMemoryInitUnit = "By"
+ JvmMemoryInitDescription = "Measure of initial memory requested."
+
+ // JvmSystemCPUUtilization is the metric conforming to the
+ // "jvm.system.cpu.utilization" semantic conventions. It represents the recent
+ // CPU utilization for the whole system as reported by the JVM.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization"
+ JvmSystemCPUUtilizationUnit = "1"
+ JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM."
+
+ // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m"
+ // semantic conventions. It represents the average CPU load of the whole system
+ // for the last minute as reported by the JVM.
+ // Instrument: gauge
+ // Unit: {run_queue_item}
+ // Stability: Experimental
+ JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m"
+ JvmSystemCPULoad1mUnit = "{run_queue_item}"
+ JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM."
+
+ // JvmBufferMemoryUsage is the metric conforming to the
+ // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of
+ // memory used by buffers.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ JvmBufferMemoryUsageName = "jvm.buffer.memory.usage"
+ JvmBufferMemoryUsageUnit = "By"
+ JvmBufferMemoryUsageDescription = "Measure of memory used by buffers."
+
+ // JvmBufferMemoryLimit is the metric conforming to the
+ // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of
+ // total memory capacity of buffers.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ JvmBufferMemoryLimitName = "jvm.buffer.memory.limit"
+ JvmBufferMemoryLimitUnit = "By"
+ JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers."
+
+ // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic
+ // conventions. It represents the number of buffers in the pool.
+ // Instrument: updowncounter
+ // Unit: {buffer}
+ // Stability: Experimental
+ JvmBufferCountName = "jvm.buffer.count"
+ JvmBufferCountUnit = "{buffer}"
+ JvmBufferCountDescription = "Number of buffers in the pool."
+
+ // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic
+ // conventions. It represents the measure of memory used.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryUsedName = "jvm.memory.used"
+ JvmMemoryUsedUnit = "By"
+ JvmMemoryUsedDescription = "Measure of memory used."
+
+ // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed"
+ // semantic conventions. It represents the measure of memory committed.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryCommittedName = "jvm.memory.committed"
+ JvmMemoryCommittedUnit = "By"
+ JvmMemoryCommittedDescription = "Measure of memory committed."
+
+ // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic
+ // conventions. It represents the measure of max obtainable memory.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryLimitName = "jvm.memory.limit"
+ JvmMemoryLimitUnit = "By"
+ JvmMemoryLimitDescription = "Measure of max obtainable memory."
+
+ // JvmMemoryUsedAfterLastGc is the metric conforming to the
+ // "jvm.memory.used_after_last_gc" semantic conventions. It represents the
+ // measure of memory used, as measured after the most recent garbage collection
+ // event on this pool.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc"
+ JvmMemoryUsedAfterLastGcUnit = "By"
+ JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool."
+
+ // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic
+ // conventions. It represents the duration of JVM garbage collection actions.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ JvmGcDurationName = "jvm.gc.duration"
+ JvmGcDurationUnit = "s"
+ JvmGcDurationDescription = "Duration of JVM garbage collection actions."
+
+ // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic
+ // conventions. It represents the number of executing platform threads.
+ // Instrument: updowncounter
+ // Unit: {thread}
+ // Stability: Stable
+ JvmThreadCountName = "jvm.thread.count"
+ JvmThreadCountUnit = "{thread}"
+ JvmThreadCountDescription = "Number of executing platform threads."
+
+ // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic
+ // conventions. It represents the number of classes loaded since JVM start.
+ // Instrument: counter
+ // Unit: {class}
+ // Stability: Stable
+ JvmClassLoadedName = "jvm.class.loaded"
+ JvmClassLoadedUnit = "{class}"
+ JvmClassLoadedDescription = "Number of classes loaded since JVM start."
+
+ // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded"
+ // semantic conventions. It represents the number of classes unloaded since JVM
+ // start.
+ // Instrument: counter
+ // Unit: {class}
+ // Stability: Stable
+ JvmClassUnloadedName = "jvm.class.unloaded"
+ JvmClassUnloadedUnit = "{class}"
+ JvmClassUnloadedDescription = "Number of classes unloaded since JVM start."
+
+ // JvmClassCount is the metric conforming to the "jvm.class.count" semantic
+ // conventions. It represents the number of classes currently loaded.
+ // Instrument: updowncounter
+ // Unit: {class}
+ // Stability: Stable
+ JvmClassCountName = "jvm.class.count"
+ JvmClassCountUnit = "{class}"
+ JvmClassCountDescription = "Number of classes currently loaded."
+
+ // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic
+ // conventions. It represents the number of processors available to the Java
+ // virtual machine.
+ // Instrument: updowncounter
+ // Unit: {cpu}
+ // Stability: Stable
+ JvmCPUCountName = "jvm.cpu.count"
+ JvmCPUCountUnit = "{cpu}"
+ JvmCPUCountDescription = "Number of processors available to the Java virtual machine."
+
+ // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic
+ // conventions. It represents the cPU time used by the process as reported by
+ // the JVM.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Stable
+ JvmCPUTimeName = "jvm.cpu.time"
+ JvmCPUTimeUnit = "s"
+ JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM."
+
+ // JvmCPURecentUtilization is the metric conforming to the
+ // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent
+ // CPU utilization for the process as reported by the JVM.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Stable
+ JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization"
+ JvmCPURecentUtilizationUnit = "1"
+ JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM."
+
+ // MessagingPublishDuration is the metric conforming to the
+ // "messaging.publish.duration" semantic conventions. It represents the
+ // measures the duration of publish operation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ MessagingPublishDurationName = "messaging.publish.duration"
+ MessagingPublishDurationUnit = "s"
+ MessagingPublishDurationDescription = "Measures the duration of publish operation."
+
+ // MessagingReceiveDuration is the metric conforming to the
+ // "messaging.receive.duration" semantic conventions. It represents the
+ // measures the duration of receive operation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ MessagingReceiveDurationName = "messaging.receive.duration"
+ MessagingReceiveDurationUnit = "s"
+ MessagingReceiveDurationDescription = "Measures the duration of receive operation."
+
+ // MessagingProcessDuration is the metric conforming to the
+ // "messaging.process.duration" semantic conventions. It represents the
+ // measures the duration of process operation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ MessagingProcessDurationName = "messaging.process.duration"
+ MessagingProcessDurationUnit = "s"
+ MessagingProcessDurationDescription = "Measures the duration of process operation."
+
+ // MessagingPublishMessages is the metric conforming to the
+ // "messaging.publish.messages" semantic conventions. It represents the
+ // measures the number of published messages.
+ // Instrument: counter
+ // Unit: {message}
+ // Stability: Experimental
+ MessagingPublishMessagesName = "messaging.publish.messages"
+ MessagingPublishMessagesUnit = "{message}"
+ MessagingPublishMessagesDescription = "Measures the number of published messages."
+
+ // MessagingReceiveMessages is the metric conforming to the
+ // "messaging.receive.messages" semantic conventions. It represents the
+ // measures the number of received messages.
+ // Instrument: counter
+ // Unit: {message}
+ // Stability: Experimental
+ MessagingReceiveMessagesName = "messaging.receive.messages"
+ MessagingReceiveMessagesUnit = "{message}"
+ MessagingReceiveMessagesDescription = "Measures the number of received messages."
+
+ // MessagingProcessMessages is the metric conforming to the
+ // "messaging.process.messages" semantic conventions. It represents the
+ // measures the number of processed messages.
+ // Instrument: counter
+ // Unit: {message}
+ // Stability: Experimental
+ MessagingProcessMessagesName = "messaging.process.messages"
+ MessagingProcessMessagesUnit = "{message}"
+ MessagingProcessMessagesDescription = "Measures the number of processed messages."
+
+ // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic
+ // conventions. It represents the total CPU seconds broken down by different
+ // states.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ ProcessCPUTimeName = "process.cpu.time"
+ ProcessCPUTimeUnit = "s"
+ ProcessCPUTimeDescription = "Total CPU seconds broken down by different states."
+
+ // ProcessCPUUtilization is the metric conforming to the
+ // "process.cpu.utilization" semantic conventions. It represents the difference
+ // in process.cpu.time since the last measurement, divided by the elapsed time
+ // and number of CPUs available to the process.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ ProcessCPUUtilizationName = "process.cpu.utilization"
+ ProcessCPUUtilizationUnit = "1"
+ ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process."
+
+ // ProcessMemoryUsage is the metric conforming to the "process.memory.usage"
+ // semantic conventions. It represents the amount of physical memory in use.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ ProcessMemoryUsageName = "process.memory.usage"
+ ProcessMemoryUsageUnit = "By"
+ ProcessMemoryUsageDescription = "The amount of physical memory in use."
+
+ // ProcessMemoryVirtual is the metric conforming to the
+ // "process.memory.virtual" semantic conventions. It represents the amount of
+ // committed virtual memory.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ ProcessMemoryVirtualName = "process.memory.virtual"
+ ProcessMemoryVirtualUnit = "By"
+ ProcessMemoryVirtualDescription = "The amount of committed virtual memory."
+
+ // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic
+ // conventions. It represents the disk bytes transferred.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ProcessDiskIoName = "process.disk.io"
+ ProcessDiskIoUnit = "By"
+ ProcessDiskIoDescription = "Disk bytes transferred."
+
+ // ProcessNetworkIo is the metric conforming to the "process.network.io"
+ // semantic conventions. It represents the network bytes transferred.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ProcessNetworkIoName = "process.network.io"
+ ProcessNetworkIoUnit = "By"
+ ProcessNetworkIoDescription = "Network bytes transferred."
+
+ // ProcessThreadCount is the metric conforming to the "process.thread.count"
+ // semantic conventions. It represents the process threads count.
+ // Instrument: updowncounter
+ // Unit: {thread}
+ // Stability: Experimental
+ ProcessThreadCountName = "process.thread.count"
+ ProcessThreadCountUnit = "{thread}"
+ ProcessThreadCountDescription = "Process threads count."
+
+ // ProcessOpenFileDescriptorCount is the metric conforming to the
+ // "process.open_file_descriptor.count" semantic conventions. It represents the
+ // number of file descriptors in use by the process.
+ // Instrument: updowncounter
+ // Unit: {count}
+ // Stability: Experimental
+ ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count"
+ ProcessOpenFileDescriptorCountUnit = "{count}"
+ ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process."
+
+ // ProcessContextSwitches is the metric conforming to the
+ // "process.context_switches" semantic conventions. It represents the number of
+ // times the process has been context switched.
+ // Instrument: counter
+ // Unit: {count}
+ // Stability: Experimental
+ ProcessContextSwitchesName = "process.context_switches"
+ ProcessContextSwitchesUnit = "{count}"
+ ProcessContextSwitchesDescription = "Number of times the process has been context switched."
+
+ // ProcessPagingFaults is the metric conforming to the "process.paging.faults"
+ // semantic conventions. It represents the number of page faults the process
+ // has made.
+ // Instrument: counter
+ // Unit: {fault}
+ // Stability: Experimental
+ ProcessPagingFaultsName = "process.paging.faults"
+ ProcessPagingFaultsUnit = "{fault}"
+ ProcessPagingFaultsDescription = "Number of page faults the process has made."
+
+ // RPCServerDuration is the metric conforming to the "rpc.server.duration"
+ // semantic conventions. It represents the measures the duration of inbound
+ // RPC.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ RPCServerDurationName = "rpc.server.duration"
+ RPCServerDurationUnit = "ms"
+ RPCServerDurationDescription = "Measures the duration of inbound RPC."
+
+ // RPCServerRequestSize is the metric conforming to the
+ // "rpc.server.request.size" semantic conventions. It represents the measures
+ // the size of RPC request messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCServerRequestSizeName = "rpc.server.request.size"
+ RPCServerRequestSizeUnit = "By"
+ RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
+
+ // RPCServerResponseSize is the metric conforming to the
+ // "rpc.server.response.size" semantic conventions. It represents the measures
+ // the size of RPC response messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCServerResponseSizeName = "rpc.server.response.size"
+ RPCServerResponseSizeUnit = "By"
+ RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
+
+ // RPCServerRequestsPerRPC is the metric conforming to the
+ // "rpc.server.requests_per_rpc" semantic conventions. It represents the
+ // measures the number of messages received per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc"
+ RPCServerRequestsPerRPCUnit = "{count}"
+ RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC."
+
+ // RPCServerResponsesPerRPC is the metric conforming to the
+ // "rpc.server.responses_per_rpc" semantic conventions. It represents the
+ // measures the number of messages sent per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc"
+ RPCServerResponsesPerRPCUnit = "{count}"
+ RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
+
+ // RPCClientDuration is the metric conforming to the "rpc.client.duration"
+ // semantic conventions. It represents the measures the duration of outbound
+ // RPC.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ RPCClientDurationName = "rpc.client.duration"
+ RPCClientDurationUnit = "ms"
+ RPCClientDurationDescription = "Measures the duration of outbound RPC."
+
+ // RPCClientRequestSize is the metric conforming to the
+ // "rpc.client.request.size" semantic conventions. It represents the measures
+ // the size of RPC request messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCClientRequestSizeName = "rpc.client.request.size"
+ RPCClientRequestSizeUnit = "By"
+ RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
+
+ // RPCClientResponseSize is the metric conforming to the
+ // "rpc.client.response.size" semantic conventions. It represents the measures
+ // the size of RPC response messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCClientResponseSizeName = "rpc.client.response.size"
+ RPCClientResponseSizeUnit = "By"
+ RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
+
+ // RPCClientRequestsPerRPC is the metric conforming to the
+ // "rpc.client.requests_per_rpc" semantic conventions. It represents the
+ // measures the number of messages received per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc"
+ RPCClientRequestsPerRPCUnit = "{count}"
+ RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC."
+
+ // RPCClientResponsesPerRPC is the metric conforming to the
+ // "rpc.client.responses_per_rpc" semantic conventions. It represents the
+ // measures the number of messages sent per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc"
+ RPCClientResponsesPerRPCUnit = "{count}"
+ RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
+
+ // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic
+ // conventions. It represents the seconds each logical CPU spent on each mode.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ SystemCPUTimeName = "system.cpu.time"
+ SystemCPUTimeUnit = "s"
+ SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode"
+
+ // SystemCPUUtilization is the metric conforming to the
+ // "system.cpu.utilization" semantic conventions. It represents the difference
+ // in system.cpu.time since the last measurement, divided by the elapsed time
+ // and number of logical CPUs.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ SystemCPUUtilizationName = "system.cpu.utilization"
+ SystemCPUUtilizationUnit = "1"
+ SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs"
+
+ // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency"
+ // semantic conventions. It represents the reports the current frequency of the
+ // CPU in Hz.
+ // Instrument: gauge
+ // Unit: {Hz}
+ // Stability: Experimental
+ SystemCPUFrequencyName = "system.cpu.frequency"
+ SystemCPUFrequencyUnit = "{Hz}"
+ SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz"
+
+ // SystemCPUPhysicalCount is the metric conforming to the
+ // "system.cpu.physical.count" semantic conventions. It represents the reports
+ // the number of actual physical processor cores on the hardware.
+ // Instrument: updowncounter
+ // Unit: {cpu}
+ // Stability: Experimental
+ SystemCPUPhysicalCountName = "system.cpu.physical.count"
+ SystemCPUPhysicalCountUnit = "{cpu}"
+ SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware"
+
+ // SystemCPULogicalCount is the metric conforming to the
+ // "system.cpu.logical.count" semantic conventions. It represents the reports
+ // the number of logical (virtual) processor cores created by the operating
+ // system to manage multitasking.
+ // Instrument: updowncounter
+ // Unit: {cpu}
+ // Stability: Experimental
+ SystemCPULogicalCountName = "system.cpu.logical.count"
+ SystemCPULogicalCountUnit = "{cpu}"
+ SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking"
+
+ // SystemMemoryUsage is the metric conforming to the "system.memory.usage"
+ // semantic conventions. It represents the reports memory in use by state.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemMemoryUsageName = "system.memory.usage"
+ SystemMemoryUsageUnit = "By"
+ SystemMemoryUsageDescription = "Reports memory in use by state."
+
+ // SystemMemoryLimit is the metric conforming to the "system.memory.limit"
+ // semantic conventions. It represents the total memory available in the
+ // system.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemMemoryLimitName = "system.memory.limit"
+ SystemMemoryLimitUnit = "By"
+ SystemMemoryLimitDescription = "Total memory available in the system."
+
+ // SystemMemoryShared is the metric conforming to the "system.memory.shared"
+ // semantic conventions. It represents the shared memory used (mostly by
+ // tmpfs).
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemMemorySharedName = "system.memory.shared"
+ SystemMemorySharedUnit = "By"
+ SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)."
+
+ // SystemMemoryUtilization is the metric conforming to the
+ // "system.memory.utilization" semantic conventions.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemMemoryUtilizationName = "system.memory.utilization"
+ SystemMemoryUtilizationUnit = "1"
+
+ // SystemPagingUsage is the metric conforming to the "system.paging.usage"
+ // semantic conventions. It represents the unix swap or windows pagefile usage.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemPagingUsageName = "system.paging.usage"
+ SystemPagingUsageUnit = "By"
+ SystemPagingUsageDescription = "Unix swap or windows pagefile usage"
+
+ // SystemPagingUtilization is the metric conforming to the
+ // "system.paging.utilization" semantic conventions.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemPagingUtilizationName = "system.paging.utilization"
+ SystemPagingUtilizationUnit = "1"
+
+ // SystemPagingFaults is the metric conforming to the "system.paging.faults"
+ // semantic conventions.
+ // Instrument: counter
+ // Unit: {fault}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemPagingFaultsName = "system.paging.faults"
+ SystemPagingFaultsUnit = "{fault}"
+
+ // SystemPagingOperations is the metric conforming to the
+ // "system.paging.operations" semantic conventions.
+ // Instrument: counter
+ // Unit: {operation}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemPagingOperationsName = "system.paging.operations"
+ SystemPagingOperationsUnit = "{operation}"
+
+ // SystemDiskIo is the metric conforming to the "system.disk.io" semantic
+ // conventions.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemDiskIoName = "system.disk.io"
+ SystemDiskIoUnit = "By"
+
+ // SystemDiskOperations is the metric conforming to the
+ // "system.disk.operations" semantic conventions.
+ // Instrument: counter
+ // Unit: {operation}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemDiskOperationsName = "system.disk.operations"
+ SystemDiskOperationsUnit = "{operation}"
+
+ // SystemDiskIoTime is the metric conforming to the "system.disk.io_time"
+ // semantic conventions. It represents the time disk spent activated.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ SystemDiskIoTimeName = "system.disk.io_time"
+ SystemDiskIoTimeUnit = "s"
+ SystemDiskIoTimeDescription = "Time disk spent activated"
+
+ // SystemDiskOperationTime is the metric conforming to the
+ // "system.disk.operation_time" semantic conventions. It represents the sum of
+ // the time each operation took to complete.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ SystemDiskOperationTimeName = "system.disk.operation_time"
+ SystemDiskOperationTimeUnit = "s"
+ SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete"
+
+ // SystemDiskMerged is the metric conforming to the "system.disk.merged"
+ // semantic conventions.
+ // Instrument: counter
+ // Unit: {operation}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemDiskMergedName = "system.disk.merged"
+ SystemDiskMergedUnit = "{operation}"
+
+ // SystemFilesystemUsage is the metric conforming to the
+ // "system.filesystem.usage" semantic conventions.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemFilesystemUsageName = "system.filesystem.usage"
+ SystemFilesystemUsageUnit = "By"
+
+ // SystemFilesystemUtilization is the metric conforming to the
+ // "system.filesystem.utilization" semantic conventions.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemFilesystemUtilizationName = "system.filesystem.utilization"
+ SystemFilesystemUtilizationUnit = "1"
+
+ // SystemNetworkDropped is the metric conforming to the
+ // "system.network.dropped" semantic conventions. It represents the count of
+ // packets that are dropped or discarded even though there was no error.
+ // Instrument: counter
+ // Unit: {packet}
+ // Stability: Experimental
+ SystemNetworkDroppedName = "system.network.dropped"
+ SystemNetworkDroppedUnit = "{packet}"
+ SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error"
+
+ // SystemNetworkPackets is the metric conforming to the
+ // "system.network.packets" semantic conventions.
+ // Instrument: counter
+ // Unit: {packet}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemNetworkPacketsName = "system.network.packets"
+ SystemNetworkPacketsUnit = "{packet}"
+
+ // SystemNetworkErrors is the metric conforming to the "system.network.errors"
+ // semantic conventions. It represents the count of network errors detected.
+ // Instrument: counter
+ // Unit: {error}
+ // Stability: Experimental
+ SystemNetworkErrorsName = "system.network.errors"
+ SystemNetworkErrorsUnit = "{error}"
+ SystemNetworkErrorsDescription = "Count of network errors detected"
+
+ // SystemNetworkIo is the metric conforming to the "system.network.io" semantic
+ // conventions.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemNetworkIoName = "system.network.io"
+ SystemNetworkIoUnit = "By"
+
+ // SystemNetworkConnections is the metric conforming to the
+ // "system.network.connections" semantic conventions.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemNetworkConnectionsName = "system.network.connections"
+ SystemNetworkConnectionsUnit = "{connection}"
+
+ // SystemProcessCount is the metric conforming to the "system.process.count"
+ // semantic conventions. It represents the total number of processes in each
+ // state.
+ // Instrument: updowncounter
+ // Unit: {process}
+ // Stability: Experimental
+ SystemProcessCountName = "system.process.count"
+ SystemProcessCountUnit = "{process}"
+ SystemProcessCountDescription = "Total number of processes in each state"
+
+ // SystemProcessCreated is the metric conforming to the
+ // "system.process.created" semantic conventions. It represents the total
+ // number of processes created over uptime of the host.
+ // Instrument: counter
+ // Unit: {process}
+ // Stability: Experimental
+ SystemProcessCreatedName = "system.process.created"
+ SystemProcessCreatedUnit = "{process}"
+ SystemProcessCreatedDescription = "Total number of processes created over uptime of the host"
+
+ // SystemLinuxMemoryAvailable is the metric conforming to the
+ // "system.linux.memory.available" semantic conventions. It represents an
+ // estimate of how much memory is available for starting new applications,
+ // without causing swapping.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemLinuxMemoryAvailableName = "system.linux.memory.available"
+ SystemLinuxMemoryAvailableUnit = "By"
+ SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
new file mode 100644
index 000000000..4c87c7adc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/
+const SchemaURL = "https://opentelemetry.io/schemas/1.26.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md
new file mode 100644
index 000000000..02b56115e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md
@@ -0,0 +1,4 @@
+
+# Migration from v1.33.0 to v1.34.0
+
+The `go.opentelemetry.io/otel/semconv/v1.34.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.33.0`.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md
new file mode 100644
index 000000000..fab06c975
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.34.0
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.34.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go
new file mode 100644
index 000000000..5b5666257
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go
@@ -0,0 +1,13851 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Namespace: android
+const (
+ // AndroidAppStateKey is the attribute Key conforming to the "android.app.state"
+ // semantic conventions. It represents the this attribute represents the state
+ // of the application.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "created"
+ // Note: The Android lifecycle states are defined in
+ // [Activity lifecycle callbacks], and from which the `OS identifiers` are
+ // derived.
+ //
+ // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc
+ AndroidAppStateKey = attribute.Key("android.app.state")
+
+ // AndroidOSAPILevelKey is the attribute Key conforming to the
+ // "android.os.api_level" semantic conventions. It represents the uniquely
+ // identifies the framework API revision offered by a version (`os.version`) of
+ // the android operating system. More information can be found [here].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "33", "32"
+ //
+ // [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels
+ AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
+)
+
+// AndroidOSAPILevel returns an attribute KeyValue conforming to the
+// "android.os.api_level" semantic conventions. It represents the uniquely
+// identifies the framework API revision offered by a version (`os.version`) of
+// the android operating system. More information can be found [here].
+//
+// [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels
+func AndroidOSAPILevel(val string) attribute.KeyValue {
+ return AndroidOSAPILevelKey.String(val)
+}
+
+// Enum values for android.app.state
+var (
+ // Any time before Activity.onResume() or, if the app has no Activity,
+ // Context.startService() has been called in the app for the first time.
+ //
+ // Stability: development
+ AndroidAppStateCreated = AndroidAppStateKey.String("created")
+ // Any time after Activity.onPause() or, if the app has no Activity,
+ // Context.stopService() has been called when the app was in the foreground
+ // state.
+ //
+ // Stability: development
+ AndroidAppStateBackground = AndroidAppStateKey.String("background")
+ // Any time after Activity.onResume() or, if the app has no Activity,
+ // Context.startService() has been called when the app was in either the created
+ // or background states.
+ //
+ // Stability: development
+ AndroidAppStateForeground = AndroidAppStateKey.String("foreground")
+)
+
+// Namespace: app
+const (
+ // AppInstallationIDKey is the attribute Key conforming to the
+ // "app.installation.id" semantic conventions. It represents a unique identifier
+ // representing the installation of an application on a specific device.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092"
+ // Note: Its value SHOULD persist across launches of the same application
+ // installation, including through application upgrades.
+ // It SHOULD change if the application is uninstalled or if all applications of
+ // the vendor are uninstalled.
+ // Additionally, users might be able to reset this value (e.g. by clearing
+ // application data).
+ // If an app is installed multiple times on the same device (e.g. in different
+ // accounts on Android), each `app.installation.id` SHOULD have a different
+ // value.
+ // If multiple OpenTelemetry SDKs are used within the same application, they
+ // SHOULD use the same value for `app.installation.id`.
+ // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the
+ // `app.installation.id`.
+ //
+ // For iOS, this value SHOULD be equal to the [vendor identifier].
+ //
+ // For Android, examples of `app.installation.id` implementations include:
+ //
+ // - [Firebase Installation ID].
+ // - A globally unique UUID which is persisted across sessions in your
+ // application.
+ // - [App set ID].
+ // - [`Settings.getString(Settings.Secure.ANDROID_ID)`].
+ //
+ // More information about Android identifier best practices can be found [here]
+ // .
+ //
+ // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor
+ // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations
+ // [App set ID]: https://developer.android.com/identity/app-set-id
+ // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID
+ // [here]: https://developer.android.com/training/articles/user-data-ids
+ AppInstallationIDKey = attribute.Key("app.installation.id")
+
+ // AppScreenCoordinateXKey is the attribute Key conforming to the
+ // "app.screen.coordinate.x" semantic conventions. It represents the x
+ // (horizontal) coordinate of a screen coordinate, in screen pixels.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 131
+ AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x")
+
+ // AppScreenCoordinateYKey is the attribute Key conforming to the
+ // "app.screen.coordinate.y" semantic conventions. It represents the y
+ // (vertical) component of a screen coordinate, in screen pixels.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 12, 99
+ AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y")
+
+ // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id"
+ // semantic conventions. It represents an identifier that uniquely
+ // differentiates this widget from other widgets in the same application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829"
+ // Note: A widget is an application component, typically an on-screen visual GUI
+ // element.
+ AppWidgetIDKey = attribute.Key("app.widget.id")
+
+ // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name"
+ // semantic conventions. It represents the name of an application widget.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "submit", "attack", "Clear Cart"
+ // Note: A widget is an application component, typically an on-screen visual GUI
+ // element.
+ AppWidgetNameKey = attribute.Key("app.widget.name")
+)
+
+// AppInstallationID returns an attribute KeyValue conforming to the
+// "app.installation.id" semantic conventions. It represents a unique identifier
+// representing the installation of an application on a specific device.
+func AppInstallationID(val string) attribute.KeyValue {
+ return AppInstallationIDKey.String(val)
+}
+
+// AppScreenCoordinateX returns an attribute KeyValue conforming to the
+// "app.screen.coordinate.x" semantic conventions. It represents the x
+// (horizontal) coordinate of a screen coordinate, in screen pixels.
+func AppScreenCoordinateX(val int) attribute.KeyValue {
+ return AppScreenCoordinateXKey.Int(val)
+}
+
+// AppScreenCoordinateY returns an attribute KeyValue conforming to the
+// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical)
+// component of a screen coordinate, in screen pixels.
+func AppScreenCoordinateY(val int) attribute.KeyValue {
+ return AppScreenCoordinateYKey.Int(val)
+}
+
+// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id"
+// semantic conventions. It represents an identifier that uniquely differentiates
+// this widget from other widgets in the same application.
+func AppWidgetID(val string) attribute.KeyValue {
+ return AppWidgetIDKey.String(val)
+}
+
+// AppWidgetName returns an attribute KeyValue conforming to the
+// "app.widget.name" semantic conventions. It represents the name of an
+// application widget.
+func AppWidgetName(val string) attribute.KeyValue {
+ return AppWidgetNameKey.String(val)
+}
+
+// Namespace: artifact
+const (
+ // ArtifactAttestationFilenameKey is the attribute Key conforming to the
+ // "artifact.attestation.filename" semantic conventions. It represents the
+ // provenance filename of the built attestation which directly relates to the
+ // build artifact filename. This filename SHOULD accompany the artifact at
+ // publish time. See the [SLSA Relationship] specification for more information.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "golang-binary-amd64-v0.1.0.attestation",
+ // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation",
+ // "file-name-package.tar.gz.intoto.json1"
+ //
+ // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations
+ ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename")
+
+ // ArtifactAttestationHashKey is the attribute Key conforming to the
+ // "artifact.attestation.hash" semantic conventions. It represents the full
+ // [hash value (see glossary)], of the built attestation. Some envelopes in the
+ // [software attestation space] also refer to this as the **digest**.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408"
+ //
+ // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec
+ ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash")
+
+ // ArtifactAttestationIDKey is the attribute Key conforming to the
+ // "artifact.attestation.id" semantic conventions. It represents the id of the
+ // build [software attestation].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123"
+ //
+ // [software attestation]: https://slsa.dev/attestation-model
+ ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id")
+
+ // ArtifactFilenameKey is the attribute Key conforming to the
+ // "artifact.filename" semantic conventions. It represents the human readable
+ // file name of the artifact, typically generated during build and release
+ // processes. Often includes the package name and version in the file name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0",
+ // "release-1.tar.gz", "file-name-package.tar.gz"
+ // Note: This file name can also act as the [Package Name]
+ // in cases where the package ecosystem maps accordingly.
+ // Additionally, the artifact [can be published]
+ // for others, but that is not a guarantee.
+ //
+ // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model
+ // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain
+ ArtifactFilenameKey = attribute.Key("artifact.filename")
+
+ // ArtifactHashKey is the attribute Key conforming to the "artifact.hash"
+ // semantic conventions. It represents the full [hash value (see glossary)],
+ // often found in checksum.txt on a release of the artifact and used to verify
+ // package integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9"
+ // Note: The specific algorithm used to create the cryptographic hash value is
+ // not defined. In situations where an artifact has multiple
+ // cryptographic hashes, it is up to the implementer to choose which
+ // hash value to set here; this should be the most secure hash algorithm
+ // that is suitable for the situation and consistent with the
+ // corresponding attestation. The implementer can then provide the other
+ // hash values through an additional set of attribute extensions as they
+ // deem necessary.
+ //
+ // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ ArtifactHashKey = attribute.Key("artifact.hash")
+
+ // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl"
+ // semantic conventions. It represents the [Package URL] of the
+ // [package artifact] provides a standard way to identify and locate the
+ // packaged artifact.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pkg:github/package-url/purl-spec@1209109710924",
+ // "pkg:npm/foo@12.12.3"
+ //
+ // [Package URL]: https://github.com/package-url/purl-spec
+ // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model
+ ArtifactPurlKey = attribute.Key("artifact.purl")
+
+ // ArtifactVersionKey is the attribute Key conforming to the "artifact.version"
+ // semantic conventions. It represents the version of the artifact.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "v0.1.0", "1.2.1", "122691-build"
+ ArtifactVersionKey = attribute.Key("artifact.version")
+)
+
+// ArtifactAttestationFilename returns an attribute KeyValue conforming to the
+// "artifact.attestation.filename" semantic conventions. It represents the
+// provenance filename of the built attestation which directly relates to the
+// build artifact filename. This filename SHOULD accompany the artifact at
+// publish time. See the [SLSA Relationship] specification for more information.
+//
+// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations
+func ArtifactAttestationFilename(val string) attribute.KeyValue {
+ return ArtifactAttestationFilenameKey.String(val)
+}
+
+// ArtifactAttestationHash returns an attribute KeyValue conforming to the
+// "artifact.attestation.hash" semantic conventions. It represents the full
+// [hash value (see glossary)], of the built attestation. Some envelopes in the
+// [software attestation space] also refer to this as the **digest**.
+//
+// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec
+func ArtifactAttestationHash(val string) attribute.KeyValue {
+ return ArtifactAttestationHashKey.String(val)
+}
+
+// ArtifactAttestationID returns an attribute KeyValue conforming to the
+// "artifact.attestation.id" semantic conventions. It represents the id of the
+// build [software attestation].
+//
+// [software attestation]: https://slsa.dev/attestation-model
+func ArtifactAttestationID(val string) attribute.KeyValue {
+ return ArtifactAttestationIDKey.String(val)
+}
+
+// ArtifactFilename returns an attribute KeyValue conforming to the
+// "artifact.filename" semantic conventions. It represents the human readable
+// file name of the artifact, typically generated during build and release
+// processes. Often includes the package name and version in the file name.
+func ArtifactFilename(val string) attribute.KeyValue {
+ return ArtifactFilenameKey.String(val)
+}
+
+// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash"
+// semantic conventions. It represents the full [hash value (see glossary)],
+// often found in checksum.txt on a release of the artifact and used to verify
+// package integrity.
+//
+// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+func ArtifactHash(val string) attribute.KeyValue {
+ return ArtifactHashKey.String(val)
+}
+
+// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl"
+// semantic conventions. It represents the [Package URL] of the
+// [package artifact] provides a standard way to identify and locate the packaged
+// artifact.
+//
+// [Package URL]: https://github.com/package-url/purl-spec
+// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model
+func ArtifactPurl(val string) attribute.KeyValue {
+ return ArtifactPurlKey.String(val)
+}
+
+// ArtifactVersion returns an attribute KeyValue conforming to the
+// "artifact.version" semantic conventions. It represents the version of the
+// artifact.
+func ArtifactVersion(val string) attribute.KeyValue {
+ return ArtifactVersionKey.String(val)
+}
+
+// Namespace: aws
+const (
+ // AWSBedrockGuardrailIDKey is the attribute Key conforming to the
+ // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique
+ // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and
+ // prevent unwanted behavior from model responses or user messages.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "sgi5gkybzqak"
+ //
+ // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html
+ AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id")
+
+ // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the
+ // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the
+ // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a
+ // bank of information that can be queried by models to generate more relevant
+ // responses and augment prompts.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "XFWUPB9PAW"
+ //
+ // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html
+ AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id")
+
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the
+ // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `AttributeDefinitions` request
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "AttributeName": "string", "AttributeType": "string" }"
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "lives", "id"
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+ // of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits":
+ // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number,
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } },
+ // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number,
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName":
+ // "string", "WriteCapacityUnits": number }"
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of the
+ // `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the
+ // value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Users", "CatsTable"
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits":
+ // number } }"
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the
+ // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents
+ // the JSON-serialized value of each item of the `GlobalSecondaryIndexes`
+ // request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }"
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value of
+ // the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "name_to_group"
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the
+ // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents
+ // the JSON-serialized value of the `ItemCollectionMetrics` response field.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob,
+ // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" :
+ // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S":
+ // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }"
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of the
+ // `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the
+ // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents
+ // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes":
+ // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string",
+ // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
+ // "ProjectionType": "string" } }"
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value of
+ // the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems,
+ // ProductReviews"
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents
+ // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents
+ // the value of the `ProvisionedThroughput.WriteCapacityUnits` request
+ // parameter.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+ // the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of
+ // the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of the
+ // `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of the
+ // `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ALL_ATTRIBUTES", "COUNT"
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the number of
+ // items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+ // the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Users", "Cats"
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the value
+ // of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS cluster].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster"
+ //
+ // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container instance].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9"
+ //
+ // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch type]
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn"
+ // semantic conventions. It represents the ARN of a running [ECS task].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b",
+ // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd"
+ //
+ // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the family name of
+ // the [ECS task definition] used to create the ECS task.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-family"
+ //
+ // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id"
+ // semantic conventions. It represents the ID of a running ECS task. The ID MUST
+ // be extracted from `task.arn`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b",
+ // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd"
+ AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision for
+ // the task definition used to create the ECS task.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "8", "26"
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster"
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+
+ // AWSExtendedRequestIDKey is the attribute Key conforming to the
+ // "aws.extended_request_id" semantic conventions. It represents the AWS
+ // extended request ID as returned in the response header `x-amz-id-2`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ="
+ AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id")
+
+ // AWSKinesisStreamNameKey is the attribute Key conforming to the
+ // "aws.kinesis.stream_name" semantic conventions. It represents the name of the
+ // AWS Kinesis [stream] the request refers to. Corresponds to the
+ // `--stream-name` parameter of the Kinesis [describe-stream] operation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "some-stream-name"
+ //
+ // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html
+ // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html
+ AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name")
+
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked
+ // ARN as provided on the `Context` passed to the function (
+ // `Lambda-Runtime-Invoked-Function-Arn` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias"
+ // Note: This may be different from `cloud.resource_id` if an alias is involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+
+ // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the
+ // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID
+ // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda
+ // function. It's contents are read by Lambda and used to trigger a function.
+ // This isn't available in the lambda execution context or the lambda runtime
+ // environtment. This is going to be populated by the AWS SDK for each language
+ // when that UUID is present. Some of these operations are
+ // Create/Delete/Get/List/Update EventSourceMapping.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7"
+ //
+ // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html
+ AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id")
+
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+ // Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*"
+ // Note: See the [log group ARN format documentation].
+ //
+ // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of the
+ // AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/aws/lambda/my-function", "opentelemetry-service"
+ // Note: Multiple log groups must be supported for cases like multi-container
+ // applications, where a single application has sidecar containers, and each
+ // write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+ // AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b"
+ // Note: See the [log stream ARN format documentation]. One log group can
+ // contain several log streams, so these ARNs necessarily identify both a log
+ // group and a log stream.
+ //
+ // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s) of the
+ // AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b"
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in the
+ // response headers `x-amzn-requestid`, `x-amzn-request-id` or
+ // `x-amz-request-id`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ"
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request refers to.
+ // Corresponds to the `--bucket` parameter of the [S3 API] operations.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "some-bucket-name"
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ //
+ // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source object
+ // (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "someFile.yml"
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3 API].
+ // This applies in particular to the following operations:
+ //
+ // - [copy-object]
+ // - [upload-part-copy]
+ //
+ //
+ // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html
+ // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean"
+ // Note: The `delete` attribute is only applicable to the [delete-object]
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3 API].
+ //
+ // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html
+ // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3 API] operations.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "someFile.yml"
+ // Note: The `key` attribute is applicable to all object-related S3 operations,
+ // i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // - [copy-object]
+ // - [delete-object]
+ // - [get-object]
+ // - [head-object]
+ // - [put-object]
+ // - [restore-object]
+ // - [select-object-content]
+ // - [abort-multipart-upload]
+ // - [complete-multipart-upload]
+ // - [create-multipart-upload]
+ // - [list-parts]
+ // - [upload-part]
+ // - [upload-part-copy]
+ //
+ //
+ // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+ // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html
+ // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html
+ // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html
+ // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html
+ // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html
+ // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html
+ // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html
+ // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html
+ // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html
+ // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html
+ // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html
+ // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number of
+ // the part being uploaded in a multipart-upload operation. This is a positive
+ // integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the [upload-part]
+ // and [upload-part-copy] operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter of
+ // the
+ // [upload-part operation within the S3 API].
+ //
+ // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id"
+ // semantic conventions. It represents the upload ID that identifies the
+ // multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ"
+ // Note: The `upload_id` attribute applies to S3 multipart-upload operations and
+ // corresponds to the `--upload-id` parameter
+ // of the [S3 API] multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // - [abort-multipart-upload]
+ // - [complete-multipart-upload]
+ // - [list-parts]
+ // - [upload-part]
+ // - [upload-part-copy]
+ //
+ //
+ // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+ // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html
+ // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html
+ // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html
+ // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+
+ // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the
+ // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN
+ // of the Secret stored in the Secrets Mangger.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters"
+ AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn")
+
+ // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn"
+ // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon
+ // SNS [topic] is a logical access point that acts as a communication channel.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE"
+ //
+ // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html
+ AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn")
+
+ // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url"
+ // semantic conventions. It represents the URL of the AWS SQS Queue. It's a
+ // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is
+ // used to access the queue and perform actions on it.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue"
+ AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url")
+
+ // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the
+ // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN
+ // of the AWS Step Functions Activity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting"
+ AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn")
+
+ // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the
+ // "aws.step_functions.state_machine.arn" semantic conventions. It represents
+ // the ARN of the AWS Step Functions State Machine.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1"
+ AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn")
+)
+
+// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the
+// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique
+// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and
+// prevent unwanted behavior from model responses or user messages.
+//
+// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html
+func AWSBedrockGuardrailID(val string) attribute.KeyValue {
+ return AWSBedrockGuardrailIDKey.String(val)
+}
+
+// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the
+// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique
+// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of
+// information that can be queried by models to generate more relevant responses
+// and augment prompts.
+//
+// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html
+func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue {
+ return AWSBedrockKnowledgeBaseIDKey.String(val)
+}
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents
+// the JSON-serialized value of each item in the `AttributeDefinitions` request
+// field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the
+// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value
+// of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the
+// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the
+// value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to
+// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field.
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of the
+// `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to
+// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents
+// the JSON-serialized value of the `ItemCollectionMetrics` response field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to
+// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents
+// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request
+// field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of the
+// `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request
+// parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming
+// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It
+// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request
+// parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of
+// the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the number of
+// items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the
+// `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value of
+// the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+// [ECS cluster].
+//
+// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container instance].
+//
+// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running
+// [ECS task].
+//
+// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the family name of
+// the [ECS task definition] used to create the ECS task.
+//
+// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id"
+// semantic conventions. It represents the ID of a running ECS task. The ID MUST
+// be extracted from `task.arn`.
+func AWSECSTaskID(val string) attribute.KeyValue {
+ return AWSECSTaskIDKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// the task definition used to create the ECS task.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// AWSExtendedRequestID returns an attribute KeyValue conforming to the
+// "aws.extended_request_id" semantic conventions. It represents the AWS extended
+// request ID as returned in the response header `x-amz-id-2`.
+func AWSExtendedRequestID(val string) attribute.KeyValue {
+ return AWSExtendedRequestIDKey.String(val)
+}
+
+// AWSKinesisStreamName returns an attribute KeyValue conforming to the
+// "aws.kinesis.stream_name" semantic conventions. It represents the name of the
+// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name`
+// parameter of the Kinesis [describe-stream] operation.
+//
+// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html
+// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html
+func AWSKinesisStreamName(val string) attribute.KeyValue {
+ return AWSKinesisStreamNameKey.String(val)
+}
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked
+// ARN as provided on the `Context` passed to the function (
+// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next`
+// applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the
+// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID
+// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda
+// function. It's contents are read by Lambda and used to trigger a function.
+// This isn't available in the lambda execution context or the lambda runtime
+// environtment. This is going to be populated by the AWS SDK for each language
+// when that UUID is present. Some of these operations are
+// Create/Delete/Get/List/Update EventSourceMapping.
+//
+// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html
+func AWSLambdaResourceMappingID(val string) attribute.KeyValue {
+ return AWSLambdaResourceMappingIDKey.String(val)
+}
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of the
+// AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id"
+// semantic conventions. It represents the AWS request ID as returned in the
+// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id`
+// .
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket"
+// semantic conventions. It represents the S3 bucket name the request refers to.
+// Corresponds to the `--bucket` parameter of the [S3 API] operations.
+//
+// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object (in
+// the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete"
+// semantic conventions. It represents the delete request container that
+// specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic
+// conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3 API] operations.
+//
+// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the
+// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of
+// the Secret stored in the Secrets Mangger.
+func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue {
+ return AWSSecretsmanagerSecretARNKey.String(val)
+}
+
+// AWSSNSTopicARN returns an attribute KeyValue conforming to the
+// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS
+// Topic. An Amazon SNS [topic] is a logical access point that acts as a
+// communication channel.
+//
+// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html
+func AWSSNSTopicARN(val string) attribute.KeyValue {
+ return AWSSNSTopicARNKey.String(val)
+}
+
+// AWSSQSQueueURL returns an attribute KeyValue conforming to the
+// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS
+// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service
+// (SQS) and is used to access the queue and perform actions on it.
+func AWSSQSQueueURL(val string) attribute.KeyValue {
+ return AWSSQSQueueURLKey.String(val)
+}
+
+// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the
+// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN
+// of the AWS Step Functions Activity.
+func AWSStepFunctionsActivityARN(val string) attribute.KeyValue {
+ return AWSStepFunctionsActivityARNKey.String(val)
+}
+
+// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to
+// the "aws.step_functions.state_machine.arn" semantic conventions. It represents
+// the ARN of the AWS Step Functions State Machine.
+func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue {
+ return AWSStepFunctionsStateMachineARNKey.String(val)
+}
+
+// Enum values for aws.ecs.launchtype
+var (
+ // ec2
+ // Stability: development
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ // Stability: development
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// Namespace: az
+const (
+ // AzNamespaceKey is the attribute Key conforming to the "az.namespace" semantic
+ // conventions. It represents the [Azure Resource Provider Namespace] as
+ // recognized by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus"
+ //
+ // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers
+ AzNamespaceKey = attribute.Key("az.namespace")
+
+ // AzServiceRequestIDKey is the attribute Key conforming to the
+ // "az.service_request_id" semantic conventions. It represents the unique
+ // identifier of the service request. It's generated by the Azure service and
+ // returned with the response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "00000000-0000-0000-0000-000000000000"
+ AzServiceRequestIDKey = attribute.Key("az.service_request_id")
+)
+
+// AzNamespace returns an attribute KeyValue conforming to the "az.namespace"
+// semantic conventions. It represents the [Azure Resource Provider Namespace] as
+// recognized by the client.
+//
+// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers
+func AzNamespace(val string) attribute.KeyValue {
+ return AzNamespaceKey.String(val)
+}
+
+// AzServiceRequestID returns an attribute KeyValue conforming to the
+// "az.service_request_id" semantic conventions. It represents the unique
+// identifier of the service request. It's generated by the Azure service and
+// returned with the response.
+func AzServiceRequestID(val string) attribute.KeyValue {
+ return AzServiceRequestIDKey.String(val)
+}
+
+// Namespace: azure
+const (
+ // AzureClientIDKey is the attribute Key conforming to the "azure.client.id"
+ // semantic conventions. It represents the unique identifier of the client
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1"
+ AzureClientIDKey = attribute.Key("azure.client.id")
+
+ // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.connection.mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode")
+
+ // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the
+ // "azure.cosmosdb.consistency.level" semantic conventions. It represents the
+ // account or request [consistency level].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong",
+ // "Session"
+ //
+ // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels
+ AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level")
+
+ // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to
+ // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It
+ // represents the list of regions contacted during operation in the order that
+ // they were contacted. If there is more than one region listed, it indicates
+ // that the operation was performed on multiple regions i.e. cross-regional
+ // call.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "North Central US", "Australia East", "Australia Southeast"
+ // Note: Region name matches the format of `displayName` in [Azure Location API]
+ //
+ // [Azure Location API]: https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location
+ AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions")
+
+ // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents
+ // the number of request units consumed by the operation.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 46.18, 1.0
+ AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge")
+
+ // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.request.body.size" semantic conventions. It represents the
+ // request payload size in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size")
+
+ // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents
+ // the cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1000, 1002
+ AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code")
+)
+
+// AzureClientID returns an attribute KeyValue conforming to the
+// "azure.client.id" semantic conventions. It represents the unique identifier of
+// the client instance.
+func AzureClientID(val string) attribute.KeyValue {
+ return AzureClientIDKey.String(val)
+}
+
+// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue
+// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic
+// conventions. It represents the list of regions contacted during operation in
+// the order that they were contacted. If there is more than one region listed,
+// it indicates that the operation was performed on multiple regions i.e.
+// cross-regional call.
+func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue {
+ return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val)
+}
+
+// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming
+// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It
+// represents the number of request units consumed by the operation.
+func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue {
+ return AzureCosmosDBOperationRequestChargeKey.Float64(val)
+}
+
+// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the
+// "azure.cosmosdb.request.body.size" semantic conventions. It represents the
+// request payload size in bytes.
+func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue {
+ return AzureCosmosDBRequestBodySizeKey.Int(val)
+}
+
+// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to
+// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It
+// represents the cosmos DB sub status code.
+func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue {
+ return AzureCosmosDBResponseSubStatusCodeKey.Int(val)
+}
+
+// Enum values for azure.cosmosdb.connection.mode
+var (
+ // Gateway (HTTP) connection.
+ // Stability: development
+ AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection.
+ // Stability: development
+ AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct")
+)
+
+// Enum values for azure.cosmosdb.consistency.level
+var (
+ // strong
+ // Stability: development
+ AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong")
+ // bounded_staleness
+ // Stability: development
+ AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness")
+ // session
+ // Stability: development
+ AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session")
+ // eventual
+ // Stability: development
+ AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual")
+ // consistent_prefix
+ // Stability: development
+ AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix")
+)
+
+// Namespace: browser
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99"
+ // Note: This value is intended to be taken from the [UA client hints API] (
+ // `navigator.userAgentData.brands`).
+ //
+ // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserLanguageKey is the attribute Key conforming to the "browser.language"
+ // semantic conventions. It represents the preferred language of the user using
+ // the browser.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "en", "en-US", "fr", "fr-FR"
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the browser is
+ // running on a mobile device.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This value is intended to be taken from the [UA client hints API] (
+ // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be
+ // left unset.
+ //
+ // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserPlatformKey is the attribute Key conforming to the "browser.platform"
+ // semantic conventions. It represents the platform on which the browser is
+ // running.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Windows", "macOS", "Android"
+ // Note: This value is intended to be taken from the [UA client hints API] (
+ // `navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD
+ // be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the
+ // [W3C User-Agent Client Hints specification]. Note that some (but not all) of
+ // these values can overlap with values in the
+ // [`os.type` and `os.name` attributes]. However, for consistency, the values in
+ // the `browser.platform` attribute should capture the exact value that the user
+ // agent provides.
+ //
+ // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface
+ // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform
+ // [`os.type` and `os.name` attributes]: ./os.md
+ BrowserPlatformKey = attribute.Key("browser.platform")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands"
+// semantic conventions. It represents the array of brand name and version
+// separated by a space.
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred language
+// of the user using the browser.
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile"
+// semantic conventions. It represents a boolean that is true if the browser is
+// running on a mobile device.
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running.
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// Namespace: cassandra
+const (
+ // CassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "cassandra.consistency.level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from [CQL].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html
+ CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level")
+
+ // CassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "cassandra.coordinator.dc" semantic conventions. It represents the data
+ // center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: us-west-2
+ CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc")
+
+ // CassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "cassandra.coordinator.id" semantic conventions. It represents the ID of the
+ // coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af
+ CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id")
+
+ // CassandraPageSizeKey is the attribute Key conforming to the
+ // "cassandra.page.size" semantic conventions. It represents the fetch size used
+ // for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 5000
+ CassandraPageSizeKey = attribute.Key("cassandra.page.size")
+
+ // CassandraQueryIdempotentKey is the attribute Key conforming to the
+ // "cassandra.query.idempotent" semantic conventions. It represents the whether
+ // or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent")
+
+ // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the
+ // "cassandra.speculative_execution.count" semantic conventions. It represents
+ // the number of times a query was speculatively executed. Not set or `0` if the
+ // query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 2
+ CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count")
+)
+
+// CassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "cassandra.coordinator.dc" semantic conventions. It represents the data center
+// of the coordinating node for a query.
+func CassandraCoordinatorDC(val string) attribute.KeyValue {
+ return CassandraCoordinatorDCKey.String(val)
+}
+
+// CassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "cassandra.coordinator.id" semantic conventions. It represents the ID of the
+// coordinating node for a query.
+func CassandraCoordinatorID(val string) attribute.KeyValue {
+ return CassandraCoordinatorIDKey.String(val)
+}
+
+// CassandraPageSize returns an attribute KeyValue conforming to the
+// "cassandra.page.size" semantic conventions. It represents the fetch size used
+// for paging, i.e. how many rows will be returned at once.
+func CassandraPageSize(val int) attribute.KeyValue {
+ return CassandraPageSizeKey.Int(val)
+}
+
+// CassandraQueryIdempotent returns an attribute KeyValue conforming to the
+// "cassandra.query.idempotent" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func CassandraQueryIdempotent(val bool) attribute.KeyValue {
+ return CassandraQueryIdempotentKey.Bool(val)
+}
+
+// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to
+// the "cassandra.speculative_execution.count" semantic conventions. It
+// represents the number of times a query was speculatively executed. Not set or
+// `0` if the query was not executed speculatively.
+func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return CassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// Enum values for cassandra.consistency.level
+var (
+ // all
+ // Stability: development
+ CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ // Stability: development
+ CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ // Stability: development
+ CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ // Stability: development
+ CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ // Stability: development
+ CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one")
+ // two
+ // Stability: development
+ CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two")
+ // three
+ // Stability: development
+ CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three")
+ // local_one
+ // Stability: development
+ CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one")
+ // any
+ // Stability: development
+ CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any")
+ // serial
+ // Stability: development
+ CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ // Stability: development
+ CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial")
+)
+
+// Namespace: cicd
+const (
+ // CICDPipelineActionNameKey is the attribute Key conforming to the
+ // "cicd.pipeline.action.name" semantic conventions. It represents the kind of
+ // action a pipeline run is performing.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "BUILD", "RUN", "SYNC"
+ CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name")
+
+ // CICDPipelineNameKey is the attribute Key conforming to the
+ // "cicd.pipeline.name" semantic conventions. It represents the human readable
+ // name of the pipeline within a CI/CD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Build and Test", "Lint", "Deploy Go Project",
+ // "deploy_to_environment"
+ CICDPipelineNameKey = attribute.Key("cicd.pipeline.name")
+
+ // CICDPipelineResultKey is the attribute Key conforming to the
+ // "cicd.pipeline.result" semantic conventions. It represents the result of a
+ // pipeline run.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "success", "failure", "timeout", "skipped"
+ CICDPipelineResultKey = attribute.Key("cicd.pipeline.result")
+
+ // CICDPipelineRunIDKey is the attribute Key conforming to the
+ // "cicd.pipeline.run.id" semantic conventions. It represents the unique
+ // identifier of a pipeline run within a CI/CD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "120912"
+ CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id")
+
+ // CICDPipelineRunStateKey is the attribute Key conforming to the
+ // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline
+ // run goes through these states during its lifecycle.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pending", "executing", "finalizing"
+ CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state")
+
+ // CICDPipelineRunURLFullKey is the attribute Key conforming to the
+ // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of
+ // the pipeline run, providing the complete address in order to locate and
+ // identify the pipeline run.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075"
+ //
+ // [URL]: https://wikipedia.org/wiki/URL
+ CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full")
+
+ // CICDPipelineTaskNameKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.name" semantic conventions. It represents the human
+ // readable name of a task within a pipeline. Task here most closely aligns with
+ // a [computing process] in a pipeline. Other terms for tasks include commands,
+ // steps, and procedures.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary"
+ //
+ // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing)
+ CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name")
+
+ // CICDPipelineTaskRunIDKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique
+ // identifier of a task run within a pipeline.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "12097"
+ CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id")
+
+ // CICDPipelineTaskRunResultKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.run.result" semantic conventions. It represents the
+ // result of a task run.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "success", "failure", "timeout", "skipped"
+ CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result")
+
+ // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the
+ // [URL] of the pipeline task run, providing the complete address in order to
+ // locate and identify the pipeline task run.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075"
+ //
+ // [URL]: https://wikipedia.org/wiki/URL
+ CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full")
+
+ // CICDPipelineTaskTypeKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.type" semantic conventions. It represents the type of the
+ // task within a pipeline.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "build", "test", "deploy"
+ CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type")
+
+ // CICDSystemComponentKey is the attribute Key conforming to the
+ // "cicd.system.component" semantic conventions. It represents the name of a
+ // component of the CICD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "controller", "scheduler", "agent"
+ CICDSystemComponentKey = attribute.Key("cicd.system.component")
+
+ // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id"
+ // semantic conventions. It represents the unique identifier of a worker within
+ // a CICD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "abc123", "10.0.1.2", "controller"
+ CICDWorkerIDKey = attribute.Key("cicd.worker.id")
+
+ // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name"
+ // semantic conventions. It represents the name of a worker within a CICD
+ // system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "agent-abc", "controller", "Ubuntu LTS"
+ CICDWorkerNameKey = attribute.Key("cicd.worker.name")
+
+ // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state"
+ // semantic conventions. It represents the state of a CICD worker / agent.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "idle", "busy", "down"
+ CICDWorkerStateKey = attribute.Key("cicd.worker.state")
+
+ // CICDWorkerURLFullKey is the attribute Key conforming to the
+ // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the
+ // worker, providing the complete address in order to locate and identify the
+ // worker.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://cicd.example.org/worker/abc123"
+ //
+ // [URL]: https://wikipedia.org/wiki/URL
+ CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full")
+)
+
+// CICDPipelineName returns an attribute KeyValue conforming to the
+// "cicd.pipeline.name" semantic conventions. It represents the human readable
+// name of the pipeline within a CI/CD system.
+func CICDPipelineName(val string) attribute.KeyValue {
+ return CICDPipelineNameKey.String(val)
+}
+
+// CICDPipelineRunID returns an attribute KeyValue conforming to the
+// "cicd.pipeline.run.id" semantic conventions. It represents the unique
+// identifier of a pipeline run within a CI/CD system.
+func CICDPipelineRunID(val string) attribute.KeyValue {
+ return CICDPipelineRunIDKey.String(val)
+}
+
+// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the
+// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of
+// the pipeline run, providing the complete address in order to locate and
+// identify the pipeline run.
+//
+// [URL]: https://wikipedia.org/wiki/URL
+func CICDPipelineRunURLFull(val string) attribute.KeyValue {
+ return CICDPipelineRunURLFullKey.String(val)
+}
+
+// CICDPipelineTaskName returns an attribute KeyValue conforming to the
+// "cicd.pipeline.task.name" semantic conventions. It represents the human
+// readable name of a task within a pipeline. Task here most closely aligns with
+// a [computing process] in a pipeline. Other terms for tasks include commands,
+// steps, and procedures.
+//
+// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing)
+func CICDPipelineTaskName(val string) attribute.KeyValue {
+ return CICDPipelineTaskNameKey.String(val)
+}
+
+// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the
+// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique
+// identifier of a task run within a pipeline.
+func CICDPipelineTaskRunID(val string) attribute.KeyValue {
+ return CICDPipelineTaskRunIDKey.String(val)
+}
+
+// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the
+// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the
+// [URL] of the pipeline task run, providing the complete address in order to
+// locate and identify the pipeline task run.
+//
+// [URL]: https://wikipedia.org/wiki/URL
+func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue {
+ return CICDPipelineTaskRunURLFullKey.String(val)
+}
+
+// CICDSystemComponent returns an attribute KeyValue conforming to the
+// "cicd.system.component" semantic conventions. It represents the name of a
+// component of the CICD system.
+func CICDSystemComponent(val string) attribute.KeyValue {
+ return CICDSystemComponentKey.String(val)
+}
+
+// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id"
+// semantic conventions. It represents the unique identifier of a worker within a
+// CICD system.
+func CICDWorkerID(val string) attribute.KeyValue {
+ return CICDWorkerIDKey.String(val)
+}
+
+// CICDWorkerName returns an attribute KeyValue conforming to the
+// "cicd.worker.name" semantic conventions. It represents the name of a worker
+// within a CICD system.
+func CICDWorkerName(val string) attribute.KeyValue {
+ return CICDWorkerNameKey.String(val)
+}
+
+// CICDWorkerURLFull returns an attribute KeyValue conforming to the
+// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the
+// worker, providing the complete address in order to locate and identify the
+// worker.
+//
+// [URL]: https://wikipedia.org/wiki/URL
+func CICDWorkerURLFull(val string) attribute.KeyValue {
+ return CICDWorkerURLFullKey.String(val)
+}
+
+// Enum values for cicd.pipeline.action.name
+var (
+ // The pipeline run is executing a build.
+ // Stability: development
+ CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD")
+ // The pipeline run is executing.
+ // Stability: development
+ CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN")
+ // The pipeline run is executing a sync.
+ // Stability: development
+ CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC")
+)
+
+// Enum values for cicd.pipeline.result
+var (
+ // The pipeline run finished successfully.
+ // Stability: development
+ CICDPipelineResultSuccess = CICDPipelineResultKey.String("success")
+ // The pipeline run did not finish successfully, eg. due to a compile error or a
+ // failing test. Such failures are usually detected by non-zero exit codes of
+ // the tools executed in the pipeline run.
+ // Stability: development
+ CICDPipelineResultFailure = CICDPipelineResultKey.String("failure")
+ // The pipeline run failed due to an error in the CICD system, eg. due to the
+ // worker being killed.
+ // Stability: development
+ CICDPipelineResultError = CICDPipelineResultKey.String("error")
+ // A timeout caused the pipeline run to be interrupted.
+ // Stability: development
+ CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout")
+ // The pipeline run was cancelled, eg. by a user manually cancelling the
+ // pipeline run.
+ // Stability: development
+ CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation")
+ // The pipeline run was skipped, eg. due to a precondition not being met.
+ // Stability: development
+ CICDPipelineResultSkip = CICDPipelineResultKey.String("skip")
+)
+
+// Enum values for cicd.pipeline.run.state
+var (
+ // The run pending state spans from the event triggering the pipeline run until
+ // the execution of the run starts (eg. time spent in a queue, provisioning
+ // agents, creating run resources).
+ //
+ // Stability: development
+ CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending")
+ // The executing state spans the execution of any run tasks (eg. build, test).
+ // Stability: development
+ CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing")
+ // The finalizing state spans from when the run has finished executing (eg.
+ // cleanup of run resources).
+ // Stability: development
+ CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing")
+)
+
+// Enum values for cicd.pipeline.task.run.result
+var (
+ // The task run finished successfully.
+ // Stability: development
+ CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success")
+ // The task run did not finish successfully, eg. due to a compile error or a
+ // failing test. Such failures are usually detected by non-zero exit codes of
+ // the tools executed in the task run.
+ // Stability: development
+ CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure")
+ // The task run failed due to an error in the CICD system, eg. due to the worker
+ // being killed.
+ // Stability: development
+ CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error")
+ // A timeout caused the task run to be interrupted.
+ // Stability: development
+ CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout")
+ // The task run was cancelled, eg. by a user manually cancelling the task run.
+ // Stability: development
+ CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation")
+ // The task run was skipped, eg. due to a precondition not being met.
+ // Stability: development
+ CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip")
+)
+
+// Enum values for cicd.pipeline.task.type
+var (
+ // build
+ // Stability: development
+ CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build")
+ // test
+ // Stability: development
+ CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test")
+ // deploy
+ // Stability: development
+ CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy")
+)
+
+// Enum values for cicd.worker.state
+var (
+ // The worker is not performing work for the CICD system. It is available to the
+ // CICD system to perform work on (online / idle).
+ // Stability: development
+ CICDWorkerStateAvailable = CICDWorkerStateKey.String("available")
+ // The worker is performing work for the CICD system.
+ // Stability: development
+ CICDWorkerStateBusy = CICDWorkerStateKey.String("busy")
+ // The worker is not available to the CICD system (disconnected / down).
+ // Stability: development
+ CICDWorkerStateOffline = CICDWorkerStateKey.String("offline")
+)
+
+// Namespace: client
+const (
+ // ClientAddressKey is the attribute Key conforming to the "client.address"
+ // semantic conventions. It represents the client address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix domain
+ // socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the server side, and when communicating through an
+ // intermediary, `client.address` SHOULD represent the client address behind any
+ // intermediaries, for example proxies, if it's available.
+ ClientAddressKey = attribute.Key("client.address")
+
+ // ClientPortKey is the attribute Key conforming to the "client.port" semantic
+ // conventions. It represents the client port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 65123
+ // Note: When observed from the server side, and when communicating through an
+ // intermediary, `client.port` SHOULD represent the client port behind any
+ // intermediaries, for example proxies, if it's available.
+ ClientPortKey = attribute.Key("client.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the "client.address"
+// semantic conventions. It represents the client address - domain name if
+// available without reverse DNS lookup; otherwise, IP address or Unix domain
+// socket name.
+func ClientAddress(val string) attribute.KeyValue {
+ return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number.
+func ClientPort(val int) attribute.KeyValue {
+ return ClientPortKey.Int(val)
+}
+
+// Namespace: cloud
+const (
+ // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id"
+ // semantic conventions. It represents the cloud account ID the resource is
+ // assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "111111111111", "opentelemetry"
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to increase
+ // availability. Availability zone represents the zone where the resource is
+ // running.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "us-east-1c"
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic
+ // conventions. It represents the geographical region within a cloud provider.
+ // When associated with a resource, this attribute specifies the region where
+ // the resource operates. When calling services or APIs deployed on a cloud,
+ // this attribute identifies the region where the called destination is
+ // deployed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "us-central1", "us-east-1"
+ // Note: Refer to your provider's docs to see the available regions, for example
+ // [Alibaba Cloud regions], [AWS regions], [Azure regions],
+ // [Google Cloud regions], or [Tencent Cloud regions].
+ //
+ // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm
+ // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/
+ // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/
+ // [Google Cloud regions]: https://cloud.google.com/about/locations
+ // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id"
+ // semantic conventions. It represents the cloud provider-specific native
+ // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a
+ // [fully qualified resource ID] on Azure, a [full resource name] on GCP).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function",
+ // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID",
+ // "/subscriptions//resourceGroups/
+ // /providers/Microsoft.Web/sites//functions/"
+ // Note: On some cloud providers, it may not be possible to determine the full
+ // ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud provider.
+ // The following well-known definitions MUST be used if you set this attribute
+ // and they apply:
+ //
+ // - **AWS Lambda:** The function [ARN].
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias suffix]
+ // with the resolved function version, as the same runtime instance may be
+ // invocable with
+ // multiple different aliases.
+ // - **GCP:** The [URI of the resource]
+ // - **Azure:** The [Fully Qualified Resource ID] of the invoked function,
+ // *not* the function app, having the form
+ //
+ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`
+ // .
+ // This means that a span attribute MUST be used, as an Azure function app
+ // can host multiple functions that would usually share
+ // a TracerProvider.
+ //
+ //
+ // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+ // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id
+ // [full resource name]: https://google.aip.dev/122#full-resource-names
+ // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+ // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html
+ // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names
+ // [Fully Qualified Resource ID]: https://docs.microsoft.com/rest/api/resources/resources/get-by-id
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the "cloud.region"
+// semantic conventions. It represents the geographical region within a cloud
+// provider. When associated with a resource, this attribute specifies the region
+// where the resource operates. When calling services or APIs deployed on a
+// cloud, this attribute identifies the region where the called destination is
+// deployed.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name]
+// on GCP).
+//
+// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id
+// [full resource name]: https://google.aip.dev/122#full-resource-names
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// Enum values for cloud.platform
+var (
+ // Alibaba Cloud Elastic Compute Service
+ // Stability: development
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ // Stability: development
+ CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ // Stability: development
+ CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ // Stability: development
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ // Stability: development
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ // Stability: development
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ // Stability: development
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ // Stability: development
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ // Stability: development
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ // Stability: development
+ CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ // Stability: development
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Apps
+ // Stability: development
+ CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps")
+ // Azure Container Instances
+ // Stability: development
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ // Stability: development
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ // Stability: development
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ // Stability: development
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ // Stability: development
+ CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure_openshift")
+ // Google Bare Metal Solution (BMS)
+ // Stability: development
+ CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
+ // Google Cloud Compute Engine (GCE)
+ // Stability: development
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ // Stability: development
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ // Stability: development
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ // Stability: development
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ // Stability: development
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ // Stability: development
+ CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ // Stability: development
+ CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Compute on Oracle Cloud Infrastructure (OCI)
+ // Stability: development
+ CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute")
+ // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI)
+ // Stability: development
+ CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ // Stability: development
+ CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ // Stability: development
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ // Stability: development
+ CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// Enum values for cloud.provider
+var (
+ // Alibaba Cloud
+ // Stability: development
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ // Stability: development
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ // Stability: development
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ // Stability: development
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ // Stability: development
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // IBM Cloud
+ // Stability: development
+ CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud")
+ // Oracle Cloud Infrastructure (OCI)
+ // Stability: development
+ CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud")
+ // Tencent Cloud
+ // Stability: development
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+// Namespace: cloudevents
+const (
+ // CloudEventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the [event_id]
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001"
+ //
+ // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id
+ CloudEventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudEventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the [source]
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123",
+ // "my-service"
+ //
+ // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1
+ CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudEventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents specification] which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0
+ //
+ // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion
+ CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudEventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the [subject]
+ // of the event in the context of the event producer (identified by source).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: mynewfile.jpg
+ //
+ // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject
+ CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+
+ // CloudEventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the [event_type]
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2"
+ //
+ // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type
+ CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type")
+)
+
+// CloudEventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the [event_id]
+// uniquely identifies the event.
+//
+// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id
+func CloudEventsEventID(val string) attribute.KeyValue {
+ return CloudEventsEventIDKey.String(val)
+}
+
+// CloudEventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the [source]
+// identifies the context in which an event happened.
+//
+// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1
+func CloudEventsEventSource(val string) attribute.KeyValue {
+ return CloudEventsEventSourceKey.String(val)
+}
+
+// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the
+// "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents specification] which the event uses.
+//
+// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion
+func CloudEventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudEventsEventSpecVersionKey.String(val)
+}
+
+// CloudEventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the [subject]
+// of the event in the context of the event producer (identified by source).
+//
+// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject
+func CloudEventsEventSubject(val string) attribute.KeyValue {
+ return CloudEventsEventSubjectKey.String(val)
+}
+
+// CloudEventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the [event_type]
+// contains a value describing the type of event related to the originating
+// occurrence.
+//
+// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type
+func CloudEventsEventType(val string) attribute.KeyValue {
+ return CloudEventsEventTypeKey.String(val)
+}
+
+// Namespace: cloudfoundry
+const (
+ // CloudFoundryAppIDKey is the attribute Key conforming to the
+ // "cloudfoundry.app.id" semantic conventions. It represents the guid of the
+ // application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.application_id`. This is the same value as
+ // reported by `cf app --guid`.
+ CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id")
+
+ // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the
+ // "cloudfoundry.app.instance.id" semantic conventions. It represents the index
+ // of the application instance. 0 when just one instance is active.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0", "1"
+ // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope]
+ // .
+ // It is used for logs and metrics emitted by CloudFoundry. It is
+ // supposed to contain the application instance index for applications
+ // deployed on the runtime.
+ //
+ // Application instrumentation should use the value from environment
+ // variable `CF_INSTANCE_INDEX`.
+ //
+ // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope
+ CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id")
+
+ // CloudFoundryAppNameKey is the attribute Key conforming to the
+ // "cloudfoundry.app.name" semantic conventions. It represents the name of the
+ // application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-app-name"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.application_name`. This is the same value
+ // as reported by `cf apps`.
+ CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name")
+
+ // CloudFoundryOrgIDKey is the attribute Key conforming to the
+ // "cloudfoundry.org.id" semantic conventions. It represents the guid of the
+ // CloudFoundry org the application is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.org_id`. This is the same value as
+ // reported by `cf org --guid`.
+ CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id")
+
+ // CloudFoundryOrgNameKey is the attribute Key conforming to the
+ // "cloudfoundry.org.name" semantic conventions. It represents the name of the
+ // CloudFoundry organization the app is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-org-name"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.org_name`. This is the same value as
+ // reported by `cf orgs`.
+ CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name")
+
+ // CloudFoundryProcessIDKey is the attribute Key conforming to the
+ // "cloudfoundry.process.id" semantic conventions. It represents the UID
+ // identifying the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to
+ // `VCAP_APPLICATION.app_id` for applications deployed to the runtime.
+ // For system components, this could be the actual PID.
+ CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id")
+
+ // CloudFoundryProcessTypeKey is the attribute Key conforming to the
+ // "cloudfoundry.process.type" semantic conventions. It represents the type of
+ // process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "web"
+ // Note: CloudFoundry applications can consist of multiple jobs. Usually the
+ // main process will be of type `web`. There can be additional background
+ // tasks or side-cars with different process types.
+ CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type")
+
+ // CloudFoundrySpaceIDKey is the attribute Key conforming to the
+ // "cloudfoundry.space.id" semantic conventions. It represents the guid of the
+ // CloudFoundry space the application is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.space_id`. This is the same value as
+ // reported by `cf space --guid`.
+ CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id")
+
+ // CloudFoundrySpaceNameKey is the attribute Key conforming to the
+ // "cloudfoundry.space.name" semantic conventions. It represents the name of the
+ // CloudFoundry space the application is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-space-name"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.space_name`. This is the same value as
+ // reported by `cf spaces`.
+ CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name")
+
+ // CloudFoundrySystemIDKey is the attribute Key conforming to the
+ // "cloudfoundry.system.id" semantic conventions. It represents a guid or
+ // another name describing the event source.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cf/gorouter"
+ // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope].
+ // It is used for logs and metrics emitted by CloudFoundry. It is
+ // supposed to contain the component name, e.g. "gorouter", for
+ // CloudFoundry components.
+ //
+ // When system components are instrumented, values from the
+ // [Bosh spec]
+ // should be used. The `system.id` should be set to
+ // `spec.deployment/spec.name`.
+ //
+ // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope
+ // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec
+ CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id")
+
+ // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the
+ // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid
+ // describing the concrete instance of the event source.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope]
+ // .
+ // It is used for logs and metrics emitted by CloudFoundry. It is
+ // supposed to contain the vm id for CloudFoundry components.
+ //
+ // When system components are instrumented, values from the
+ // [Bosh spec]
+ // should be used. The `system.instance.id` should be set to `spec.id`.
+ //
+ // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope
+ // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec
+ CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id")
+)
+
+// CloudFoundryAppID returns an attribute KeyValue conforming to the
+// "cloudfoundry.app.id" semantic conventions. It represents the guid of the
+// application.
+func CloudFoundryAppID(val string) attribute.KeyValue {
+ return CloudFoundryAppIDKey.String(val)
+}
+
+// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the
+// "cloudfoundry.app.instance.id" semantic conventions. It represents the index
+// of the application instance. 0 when just one instance is active.
+func CloudFoundryAppInstanceID(val string) attribute.KeyValue {
+ return CloudFoundryAppInstanceIDKey.String(val)
+}
+
+// CloudFoundryAppName returns an attribute KeyValue conforming to the
+// "cloudfoundry.app.name" semantic conventions. It represents the name of the
+// application.
+func CloudFoundryAppName(val string) attribute.KeyValue {
+ return CloudFoundryAppNameKey.String(val)
+}
+
+// CloudFoundryOrgID returns an attribute KeyValue conforming to the
+// "cloudfoundry.org.id" semantic conventions. It represents the guid of the
+// CloudFoundry org the application is running in.
+func CloudFoundryOrgID(val string) attribute.KeyValue {
+ return CloudFoundryOrgIDKey.String(val)
+}
+
+// CloudFoundryOrgName returns an attribute KeyValue conforming to the
+// "cloudfoundry.org.name" semantic conventions. It represents the name of the
+// CloudFoundry organization the app is running in.
+func CloudFoundryOrgName(val string) attribute.KeyValue {
+ return CloudFoundryOrgNameKey.String(val)
+}
+
+// CloudFoundryProcessID returns an attribute KeyValue conforming to the
+// "cloudfoundry.process.id" semantic conventions. It represents the UID
+// identifying the process.
+func CloudFoundryProcessID(val string) attribute.KeyValue {
+ return CloudFoundryProcessIDKey.String(val)
+}
+
+// CloudFoundryProcessType returns an attribute KeyValue conforming to the
+// "cloudfoundry.process.type" semantic conventions. It represents the type of
+// process.
+func CloudFoundryProcessType(val string) attribute.KeyValue {
+ return CloudFoundryProcessTypeKey.String(val)
+}
+
+// CloudFoundrySpaceID returns an attribute KeyValue conforming to the
+// "cloudfoundry.space.id" semantic conventions. It represents the guid of the
+// CloudFoundry space the application is running in.
+func CloudFoundrySpaceID(val string) attribute.KeyValue {
+ return CloudFoundrySpaceIDKey.String(val)
+}
+
+// CloudFoundrySpaceName returns an attribute KeyValue conforming to the
+// "cloudfoundry.space.name" semantic conventions. It represents the name of the
+// CloudFoundry space the application is running in.
+func CloudFoundrySpaceName(val string) attribute.KeyValue {
+ return CloudFoundrySpaceNameKey.String(val)
+}
+
+// CloudFoundrySystemID returns an attribute KeyValue conforming to the
+// "cloudfoundry.system.id" semantic conventions. It represents a guid or another
+// name describing the event source.
+func CloudFoundrySystemID(val string) attribute.KeyValue {
+ return CloudFoundrySystemIDKey.String(val)
+}
+
+// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the
+// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid
+// describing the concrete instance of the event source.
+func CloudFoundrySystemInstanceID(val string) attribute.KeyValue {
+ return CloudFoundrySystemInstanceIDKey.String(val)
+}
+
+// Namespace: code
+const (
+ // CodeColumnNumberKey is the attribute Key conforming to the
+ // "code.column.number" semantic conventions. It represents the column number in
+ // `code.file.path` best representing the operation. It SHOULD point within the
+ // code unit named in `code.function.name`. This attribute MUST NOT be used on
+ // the Profile signal since the data is already captured in 'message Line'. This
+ // constraint is imposed to prevent redundancy and maintain data integrity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ CodeColumnNumberKey = attribute.Key("code.column.number")
+
+ // CodeFilePathKey is the attribute Key conforming to the "code.file.path"
+ // semantic conventions. It represents the source code file name that identifies
+ // the code unit as uniquely as possible (preferably an absolute file path).
+ // This attribute MUST NOT be used on the Profile signal since the data is
+ // already captured in 'message Function'. This constraint is imposed to prevent
+ // redundancy and maintain data integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: /usr/local/MyApplication/content_root/app/index.php
+ CodeFilePathKey = attribute.Key("code.file.path")
+
+ // CodeFunctionNameKey is the attribute Key conforming to the
+ // "code.function.name" semantic conventions. It represents the method or
+ // function fully-qualified name without arguments. The value should fit the
+ // natural representation of the language runtime, which is also likely the same
+ // used within `code.stacktrace` attribute value. This attribute MUST NOT be
+ // used on the Profile signal since the data is already captured in 'message
+ // Function'. This constraint is imposed to prevent redundancy and maintain data
+ // integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "com.example.MyHttpService.serveRequest",
+ // "GuzzleHttp\Client::transfer", "fopen"
+ // Note: Values and format depends on each language runtime, thus it is
+ // impossible to provide an exhaustive list of examples.
+ // The values are usually the same (or prefixes of) the ones found in native
+ // stack trace representation stored in
+ // `code.stacktrace` without information on arguments.
+ //
+ // Examples:
+ //
+ // - Java method: `com.example.MyHttpService.serveRequest`
+ // - Java anonymous class method: `com.mycompany.Main$1.myMethod`
+ // - Java lambda method:
+ // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod`
+ // - PHP function: `GuzzleHttp\Client::transfer`
+ // - Go function: `github.com/my/repo/pkg.foo.func5`
+ // - Elixir: `OpenTelemetry.Ctx.new`
+ // - Erlang: `opentelemetry_ctx:new`
+ // - Rust: `playground::my_module::my_cool_func`
+ // - C function: `fopen`
+ CodeFunctionNameKey = attribute.Key("code.function.name")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.line.number"
+ // semantic conventions. It represents the line number in `code.file.path` best
+ // representing the operation. It SHOULD point within the code unit named in
+ // `code.function.name`. This attribute MUST NOT be used on the Profile signal
+ // since the data is already captured in 'message Line'. This constraint is
+ // imposed to prevent redundancy and maintain data integrity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ CodeLineNumberKey = attribute.Key("code.line.number")
+
+ // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace"
+ // semantic conventions. It represents a stacktrace as a string in the natural
+ // representation for the language runtime. The representation is identical to
+ // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile
+ // signal since the data is already captured in 'message Location'. This
+ // constraint is imposed to prevent redundancy and maintain data integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at
+ // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at
+ // com.example.GenerateTrace.main(GenerateTrace.java:5)
+ //
+ // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation
+ CodeStacktraceKey = attribute.Key("code.stacktrace")
+)
+
+// CodeColumnNumber returns an attribute KeyValue conforming to the
+// "code.column.number" semantic conventions. It represents the column number in
+// `code.file.path` best representing the operation. It SHOULD point within the
+// code unit named in `code.function.name`. This attribute MUST NOT be used on
+// the Profile signal since the data is already captured in 'message Line'. This
+// constraint is imposed to prevent redundancy and maintain data integrity.
+func CodeColumnNumber(val int) attribute.KeyValue {
+ return CodeColumnNumberKey.Int(val)
+}
+
+// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path"
+// semantic conventions. It represents the source code file name that identifies
+// the code unit as uniquely as possible (preferably an absolute file path). This
+// attribute MUST NOT be used on the Profile signal since the data is already
+// captured in 'message Function'. This constraint is imposed to prevent
+// redundancy and maintain data integrity.
+func CodeFilePath(val string) attribute.KeyValue {
+ return CodeFilePathKey.String(val)
+}
+
+// CodeFunctionName returns an attribute KeyValue conforming to the
+// "code.function.name" semantic conventions. It represents the method or
+// function fully-qualified name without arguments. The value should fit the
+// natural representation of the language runtime, which is also likely the same
+// used within `code.stacktrace` attribute value. This attribute MUST NOT be used
+// on the Profile signal since the data is already captured in 'message
+// Function'. This constraint is imposed to prevent redundancy and maintain data
+// integrity.
+func CodeFunctionName(val string) attribute.KeyValue {
+ return CodeFunctionNameKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the
+// "code.line.number" semantic conventions. It represents the line number in
+// `code.file.path` best representing the operation. It SHOULD point within the
+// code unit named in `code.function.name`. This attribute MUST NOT be used on
+// the Profile signal since the data is already captured in 'message Line'. This
+// constraint is imposed to prevent redundancy and maintain data integrity.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeStacktrace returns an attribute KeyValue conforming to the
+// "code.stacktrace" semantic conventions. It represents a stacktrace as a string
+// in the natural representation for the language runtime. The representation is
+// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the
+// Profile signal since the data is already captured in 'message Location'. This
+// constraint is imposed to prevent redundancy and maintain data integrity.
+//
+// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation
+func CodeStacktrace(val string) attribute.KeyValue {
+ return CodeStacktraceKey.String(val)
+}
+
+// Namespace: container
+const (
+ // ContainerCommandKey is the attribute Key conforming to the
+ // "container.command" semantic conventions. It represents the command used to
+ // run the container (i.e. the command name).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcontribcol"
+ // Note: If using embedded credentials or sensitive data, it is recommended to
+ // remove them to prevent potential leakage.
+ ContainerCommandKey = attribute.Key("container.command")
+
+ // ContainerCommandArgsKey is the attribute Key conforming to the
+ // "container.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) run by the
+ // container.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcontribcol", "--config", "config.yaml"
+ ContainerCommandArgsKey = attribute.Key("container.command_args")
+
+ // ContainerCommandLineKey is the attribute Key conforming to the
+ // "container.command_line" semantic conventions. It represents the full command
+ // run by the container as a single string representing the full command.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcontribcol --config config.yaml"
+ ContainerCommandLineKey = attribute.Key("container.command_line")
+
+ // ContainerCSIPluginNameKey is the attribute Key conforming to the
+ // "container.csi.plugin.name" semantic conventions. It represents the name of
+ // the CSI ([Container Storage Interface]) plugin used by the volume.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pd.csi.storage.gke.io"
+ // Note: This can sometimes be referred to as a "driver" in CSI implementations.
+ // This should represent the `name` field of the GetPluginInfo RPC.
+ //
+ // [Container Storage Interface]: https://github.com/container-storage-interface/spec
+ ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name")
+
+ // ContainerCSIVolumeIDKey is the attribute Key conforming to the
+ // "container.csi.volume.id" semantic conventions. It represents the unique
+ // volume ID returned by the CSI ([Container Storage Interface]) plugin.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk"
+ // Note: This can sometimes be referred to as a "volume handle" in CSI
+ // implementations. This should represent the `Volume.volume_id` field in CSI
+ // spec.
+ //
+ // [Container Storage Interface]: https://github.com/container-storage-interface/spec
+ ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id" semantic
+ // conventions. It represents the container ID. Usually a UUID, as for example
+ // used to [identify Docker containers]. The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "a3bf90e006b2"
+ //
+ // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerImageIDKey is the attribute Key conforming to the
+ // "container.image.id" semantic conventions. It represents the runtime specific
+ // image identifier. Usually a hash algorithm followed by a UUID.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f"
+ // Note: Docker defines a sha256 of the image id; `container.image.id`
+ // corresponds to the `Image` field from the Docker container inspect [API]
+ // endpoint.
+ // K8s defines a link to the container registry repository with digest
+ // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`
+ // .
+ // The ID is assigned by the container runtime and can vary in different
+ // environments. Consider using `oci.manifest.digest` if it is important to
+ // identify the same image in different environments/runtimes.
+ //
+ // [API]: https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect
+ ContainerImageIDKey = attribute.Key("container.image.id")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of the
+ // image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "gcr.io/opentelemetry/operator"
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageRepoDigestsKey is the attribute Key conforming to the
+ // "container.image.repo_digests" semantic conventions. It represents the repo
+ // digests of the container image as provided by the container runtime.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb",
+ // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578"
+ // Note: [Docker] and [CRI] report those under the `RepoDigests` field.
+ //
+ // [Docker]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect
+ // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238
+ ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
+
+ // ContainerImageTagsKey is the attribute Key conforming to the
+ // "container.image.tags" semantic conventions. It represents the container
+ // image tags. An example can be found in [Docker Image Inspect]. Should be only
+ // the `` section of the full name for example from
+ // `registry.example.com/my-org/my-image:`.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "v1.27.1", "3.5.7-0"
+ //
+ // [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect
+ ContainerImageTagsKey = attribute.Key("container.image.tags")
+
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-autoconf"
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container runtime
+ // managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "docker", "containerd", "rkt"
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+)
+
+// ContainerCommand returns an attribute KeyValue conforming to the
+// "container.command" semantic conventions. It represents the command used to
+// run the container (i.e. the command name).
+func ContainerCommand(val string) attribute.KeyValue {
+ return ContainerCommandKey.String(val)
+}
+
+// ContainerCommandArgs returns an attribute KeyValue conforming to the
+// "container.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) run by the
+// container.
+func ContainerCommandArgs(val ...string) attribute.KeyValue {
+ return ContainerCommandArgsKey.StringSlice(val)
+}
+
+// ContainerCommandLine returns an attribute KeyValue conforming to the
+// "container.command_line" semantic conventions. It represents the full command
+// run by the container as a single string representing the full command.
+func ContainerCommandLine(val string) attribute.KeyValue {
+ return ContainerCommandLineKey.String(val)
+}
+
+// ContainerCSIPluginName returns an attribute KeyValue conforming to the
+// "container.csi.plugin.name" semantic conventions. It represents the name of
+// the CSI ([Container Storage Interface]) plugin used by the volume.
+//
+// [Container Storage Interface]: https://github.com/container-storage-interface/spec
+func ContainerCSIPluginName(val string) attribute.KeyValue {
+ return ContainerCSIPluginNameKey.String(val)
+}
+
+// ContainerCSIVolumeID returns an attribute KeyValue conforming to the
+// "container.csi.volume.id" semantic conventions. It represents the unique
+// volume ID returned by the CSI ([Container Storage Interface]) plugin.
+//
+// [Container Storage Interface]: https://github.com/container-storage-interface/spec
+func ContainerCSIVolumeID(val string) attribute.KeyValue {
+ return ContainerCSIVolumeIDKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the "container.id"
+// semantic conventions. It represents the container ID. Usually a UUID, as for
+// example used to [identify Docker containers]. The UUID might be abbreviated.
+//
+// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerImageID returns an attribute KeyValue conforming to the
+// "container.image.id" semantic conventions. It represents the runtime specific
+// image identifier. Usually a hash algorithm followed by a UUID.
+func ContainerImageID(val string) attribute.KeyValue {
+ return ContainerImageIDKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
+// "container.image.repo_digests" semantic conventions. It represents the repo
+// digests of the container image as provided by the container runtime.
+func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
+ return ContainerImageRepoDigestsKey.StringSlice(val)
+}
+
+// ContainerImageTags returns an attribute KeyValue conforming to the
+// "container.image.tags" semantic conventions. It represents the container image
+// tags. An example can be found in [Docker Image Inspect]. Should be only the
+// `` section of the full name for example from
+// `registry.example.com/my-org/my-image:`.
+//
+// [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect
+func ContainerImageTags(val ...string) attribute.KeyValue {
+ return ContainerImageTagsKey.StringSlice(val)
+}
+
+// ContainerName returns an attribute KeyValue conforming to the "container.name"
+// semantic conventions. It represents the container name used by container
+// runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container runtime
+// managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// Namespace: cpu
+const (
+ // CPULogicalNumberKey is the attribute Key conforming to the
+ // "cpu.logical_number" semantic conventions. It represents the logical CPU
+ // number [0..n-1].
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1
+ CPULogicalNumberKey = attribute.Key("cpu.logical_number")
+
+ // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic
+ // conventions. It represents the mode of the CPU.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "user", "system"
+ CPUModeKey = attribute.Key("cpu.mode")
+)
+
+// CPULogicalNumber returns an attribute KeyValue conforming to the
+// "cpu.logical_number" semantic conventions. It represents the logical CPU
+// number [0..n-1].
+func CPULogicalNumber(val int) attribute.KeyValue {
+ return CPULogicalNumberKey.Int(val)
+}
+
+// Enum values for cpu.mode
+var (
+ // user
+ // Stability: development
+ CPUModeUser = CPUModeKey.String("user")
+ // system
+ // Stability: development
+ CPUModeSystem = CPUModeKey.String("system")
+ // nice
+ // Stability: development
+ CPUModeNice = CPUModeKey.String("nice")
+ // idle
+ // Stability: development
+ CPUModeIdle = CPUModeKey.String("idle")
+ // iowait
+ // Stability: development
+ CPUModeIOWait = CPUModeKey.String("iowait")
+ // interrupt
+ // Stability: development
+ CPUModeInterrupt = CPUModeKey.String("interrupt")
+ // steal
+ // Stability: development
+ CPUModeSteal = CPUModeKey.String("steal")
+ // kernel
+ // Stability: development
+ CPUModeKernel = CPUModeKey.String("kernel")
+)
+
+// Namespace: db
+const (
+ // DBClientConnectionPoolNameKey is the attribute Key conforming to the
+ // "db.client.connection.pool.name" semantic conventions. It represents the name
+ // of the connection pool; unique within the instrumented application. In case
+ // the connection pool implementation doesn't provide a name, instrumentation
+ // SHOULD use a combination of parameters that would make the name unique, for
+ // example, combining attributes `server.address`, `server.port`, and
+ // `db.namespace`, formatted as `server.address:server.port/db.namespace`.
+ // Instrumentations that generate connection pool name following different
+ // patterns SHOULD document it.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "myDataSource"
+ DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name")
+
+ // DBClientConnectionStateKey is the attribute Key conforming to the
+ // "db.client.connection.state" semantic conventions. It represents the state of
+ // a connection in the pool.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "idle"
+ DBClientConnectionStateKey = attribute.Key("db.client.connection.state")
+
+ // DBCollectionNameKey is the attribute Key conforming to the
+ // "db.collection.name" semantic conventions. It represents the name of a
+ // collection (table, container) within the database.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "public.users", "customers"
+ // Note: It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ //
+ // The collection name SHOULD NOT be extracted from `db.query.text`,
+ // when the database system supports query text with multiple collections
+ // in non-batch operations.
+ //
+ // For batch operations, if the individual operations are known to have the same
+ // collection name then that collection name SHOULD be used.
+ DBCollectionNameKey = attribute.Key("db.collection.name")
+
+ // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic
+ // conventions. It represents the name of the database, fully qualified within
+ // the server address and port.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "customers", "test.users"
+ // Note: If a database system has multiple namespace components, they SHOULD be
+ // concatenated from the most general to the most specific namespace component,
+ // using `|` as a separator between the components. Any missing components (and
+ // their associated separators) SHOULD be omitted.
+ // Semantic conventions for individual database systems SHOULD document what
+ // `db.namespace` means in the context of that system.
+ // It is RECOMMENDED to capture the value as provided by the application without
+ // attempting to do any case normalization.
+ DBNamespaceKey = attribute.Key("db.namespace")
+
+ // DBOperationBatchSizeKey is the attribute Key conforming to the
+ // "db.operation.batch.size" semantic conventions. It represents the number of
+ // queries included in a batch operation.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 2, 3, 4
+ // Note: Operations are only considered batches when they contain two or more
+ // operations, and so `db.operation.batch.size` SHOULD never be `1`.
+ DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size")
+
+ // DBOperationNameKey is the attribute Key conforming to the "db.operation.name"
+ // semantic conventions. It represents the name of the operation or command
+ // being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "findAndModify", "HMSET", "SELECT"
+ // Note: It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ //
+ // The operation name SHOULD NOT be extracted from `db.query.text`,
+ // when the database system supports query text with multiple operations
+ // in non-batch operations.
+ //
+ // If spaces can occur in the operation name, multiple consecutive spaces
+ // SHOULD be normalized to a single space.
+ //
+ // For batch operations, if the individual operations are known to have the same
+ // operation name
+ // then that operation name SHOULD be used prepended by `BATCH `,
+ // otherwise `db.operation.name` SHOULD be `BATCH` or some other database
+ // system specific term if more applicable.
+ DBOperationNameKey = attribute.Key("db.operation.name")
+
+ // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary"
+ // semantic conventions. It represents the low cardinality summary of a database
+ // query.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get
+ // user by id"
+ // Note: The query summary describes a class of database queries and is useful
+ // as a grouping key, especially when analyzing telemetry for database
+ // calls involving complex queries.
+ //
+ // Summary may be available to the instrumentation through
+ // instrumentation hooks or other means. If it is not available,
+ // instrumentations
+ // that support query parsing SHOULD generate a summary following
+ // [Generating query summary]
+ // section.
+ //
+ // [Generating query summary]: /docs/database/database-spans.md#generating-a-summary-of-the-query
+ DBQuerySummaryKey = attribute.Key("db.query.summary")
+
+ // DBQueryTextKey is the attribute Key conforming to the "db.query.text"
+ // semantic conventions. It represents the database query being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?"
+ // Note: For sanitization see [Sanitization of `db.query.text`].
+ // For batch operations, if the individual operations are known to have the same
+ // query text then that query text SHOULD be used, otherwise all of the
+ // individual query texts SHOULD be concatenated with separator `; ` or some
+ // other database system specific separator if more applicable.
+ // Parameterized query text SHOULD NOT be sanitized. Even though parameterized
+ // query text can potentially have sensitive data, by using a parameterized
+ // query the user is giving a strong signal that any sensitive data will be
+ // passed as parameter values, and the benefit to observability of capturing the
+ // static part of the query text by default outweighs the risk.
+ //
+ // [Sanitization of `db.query.text`]: /docs/database/database-spans.md#sanitization-of-dbquerytext
+ DBQueryTextKey = attribute.Key("db.query.text")
+
+ // DBResponseReturnedRowsKey is the attribute Key conforming to the
+ // "db.response.returned_rows" semantic conventions. It represents the number of
+ // rows returned by the operation.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10, 30, 1000
+ DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows")
+
+ // DBResponseStatusCodeKey is the attribute Key conforming to the
+ // "db.response.status_code" semantic conventions. It represents the database
+ // response status code.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "102", "ORA-17002", "08P01", "404"
+ // Note: The status code returned by the database. Usually it represents an
+ // error code, but may also represent partial success, warning, or differentiate
+ // between various types of successful outcomes.
+ // Semantic conventions for individual database systems SHOULD document what
+ // `db.response.status_code` means in the context of that system.
+ DBResponseStatusCodeKey = attribute.Key("db.response.status_code")
+
+ // DBStoredProcedureNameKey is the attribute Key conforming to the
+ // "db.stored_procedure.name" semantic conventions. It represents the name of a
+ // stored procedure within the database.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "GetCustomer"
+ // Note: It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ //
+ // For batch operations, if the individual operations are known to have the same
+ // stored procedure name then that stored procedure name SHOULD be used.
+ DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name")
+
+ // DBSystemNameKey is the attribute Key conforming to the "db.system.name"
+ // semantic conventions. It represents the database management system (DBMS)
+ // product as identified by the client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples:
+ // Note: The actual DBMS may differ from the one identified by the client. For
+ // example, when using PostgreSQL client libraries to connect to a CockroachDB,
+ // the `db.system.name` is set to `postgresql` based on the instrumentation's
+ // best knowledge.
+ DBSystemNameKey = attribute.Key("db.system.name")
+)
+
+// DBClientConnectionPoolName returns an attribute KeyValue conforming to the
+// "db.client.connection.pool.name" semantic conventions. It represents the name
+// of the connection pool; unique within the instrumented application. In case
+// the connection pool implementation doesn't provide a name, instrumentation
+// SHOULD use a combination of parameters that would make the name unique, for
+// example, combining attributes `server.address`, `server.port`, and
+// `db.namespace`, formatted as `server.address:server.port/db.namespace`.
+// Instrumentations that generate connection pool name following different
+// patterns SHOULD document it.
+func DBClientConnectionPoolName(val string) attribute.KeyValue {
+ return DBClientConnectionPoolNameKey.String(val)
+}
+
+// DBCollectionName returns an attribute KeyValue conforming to the
+// "db.collection.name" semantic conventions. It represents the name of a
+// collection (table, container) within the database.
+func DBCollectionName(val string) attribute.KeyValue {
+ return DBCollectionNameKey.String(val)
+}
+
+// DBNamespace returns an attribute KeyValue conforming to the "db.namespace"
+// semantic conventions. It represents the name of the database, fully qualified
+// within the server address and port.
+func DBNamespace(val string) attribute.KeyValue {
+ return DBNamespaceKey.String(val)
+}
+
+// DBOperationBatchSize returns an attribute KeyValue conforming to the
+// "db.operation.batch.size" semantic conventions. It represents the number of
+// queries included in a batch operation.
+func DBOperationBatchSize(val int) attribute.KeyValue {
+ return DBOperationBatchSizeKey.Int(val)
+}
+
+// DBOperationName returns an attribute KeyValue conforming to the
+// "db.operation.name" semantic conventions. It represents the name of the
+// operation or command being executed.
+func DBOperationName(val string) attribute.KeyValue {
+ return DBOperationNameKey.String(val)
+}
+
+// DBQuerySummary returns an attribute KeyValue conforming to the
+// "db.query.summary" semantic conventions. It represents the low cardinality
+// summary of a database query.
+func DBQuerySummary(val string) attribute.KeyValue {
+ return DBQuerySummaryKey.String(val)
+}
+
+// DBQueryText returns an attribute KeyValue conforming to the "db.query.text"
+// semantic conventions. It represents the database query being executed.
+func DBQueryText(val string) attribute.KeyValue {
+ return DBQueryTextKey.String(val)
+}
+
+// DBResponseReturnedRows returns an attribute KeyValue conforming to the
+// "db.response.returned_rows" semantic conventions. It represents the number of
+// rows returned by the operation.
+func DBResponseReturnedRows(val int) attribute.KeyValue {
+ return DBResponseReturnedRowsKey.Int(val)
+}
+
+// DBResponseStatusCode returns an attribute KeyValue conforming to the
+// "db.response.status_code" semantic conventions. It represents the database
+// response status code.
+func DBResponseStatusCode(val string) attribute.KeyValue {
+ return DBResponseStatusCodeKey.String(val)
+}
+
+// DBStoredProcedureName returns an attribute KeyValue conforming to the
+// "db.stored_procedure.name" semantic conventions. It represents the name of a
+// stored procedure within the database.
+func DBStoredProcedureName(val string) attribute.KeyValue {
+ return DBStoredProcedureNameKey.String(val)
+}
+
+// Enum values for db.client.connection.state
+var (
+ // idle
+ // Stability: development
+ DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle")
+ // used
+ // Stability: development
+ DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used")
+)
+
+// Enum values for db.system.name
+var (
+ // Some other SQL database. Fallback only.
+ // Stability: development
+ DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql")
+ // [Adabas (Adaptable Database System)]
+ // Stability: development
+ //
+ // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas
+ DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas")
+ // [Actian Ingres]
+ // Stability: development
+ //
+ // [Actian Ingres]: https://www.actian.com/databases/ingres/
+ DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres")
+ // [Amazon DynamoDB]
+ // Stability: development
+ //
+ // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/
+ DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb")
+ // [Amazon Redshift]
+ // Stability: development
+ //
+ // [Amazon Redshift]: https://aws.amazon.com/redshift/
+ DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift")
+ // [Azure Cosmos DB]
+ // Stability: development
+ //
+ // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db
+ DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb")
+ // [InterSystems Caché]
+ // Stability: development
+ //
+ // [InterSystems Caché]: https://www.intersystems.com/products/cache/
+ DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache")
+ // [Apache Cassandra]
+ // Stability: development
+ //
+ // [Apache Cassandra]: https://cassandra.apache.org/
+ DBSystemNameCassandra = DBSystemNameKey.String("cassandra")
+ // [ClickHouse]
+ // Stability: development
+ //
+ // [ClickHouse]: https://clickhouse.com/
+ DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse")
+ // [CockroachDB]
+ // Stability: development
+ //
+ // [CockroachDB]: https://www.cockroachlabs.com/
+ DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb")
+ // [Couchbase]
+ // Stability: development
+ //
+ // [Couchbase]: https://www.couchbase.com/
+ DBSystemNameCouchbase = DBSystemNameKey.String("couchbase")
+ // [Apache CouchDB]
+ // Stability: development
+ //
+ // [Apache CouchDB]: https://couchdb.apache.org/
+ DBSystemNameCouchDB = DBSystemNameKey.String("couchdb")
+ // [Apache Derby]
+ // Stability: development
+ //
+ // [Apache Derby]: https://db.apache.org/derby/
+ DBSystemNameDerby = DBSystemNameKey.String("derby")
+ // [Elasticsearch]
+ // Stability: development
+ //
+ // [Elasticsearch]: https://www.elastic.co/elasticsearch
+ DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch")
+ // [Firebird]
+ // Stability: development
+ //
+ // [Firebird]: https://www.firebirdsql.org/
+ DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql")
+ // [Google Cloud Spanner]
+ // Stability: development
+ //
+ // [Google Cloud Spanner]: https://cloud.google.com/spanner
+ DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner")
+ // [Apache Geode]
+ // Stability: development
+ //
+ // [Apache Geode]: https://geode.apache.org/
+ DBSystemNameGeode = DBSystemNameKey.String("geode")
+ // [H2 Database]
+ // Stability: development
+ //
+ // [H2 Database]: https://h2database.com/
+ DBSystemNameH2database = DBSystemNameKey.String("h2database")
+ // [Apache HBase]
+ // Stability: development
+ //
+ // [Apache HBase]: https://hbase.apache.org/
+ DBSystemNameHBase = DBSystemNameKey.String("hbase")
+ // [Apache Hive]
+ // Stability: development
+ //
+ // [Apache Hive]: https://hive.apache.org/
+ DBSystemNameHive = DBSystemNameKey.String("hive")
+ // [HyperSQL Database]
+ // Stability: development
+ //
+ // [HyperSQL Database]: https://hsqldb.org/
+ DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb")
+ // [IBM Db2]
+ // Stability: development
+ //
+ // [IBM Db2]: https://www.ibm.com/db2
+ DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2")
+ // [IBM Informix]
+ // Stability: development
+ //
+ // [IBM Informix]: https://www.ibm.com/products/informix
+ DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix")
+ // [IBM Netezza]
+ // Stability: development
+ //
+ // [IBM Netezza]: https://www.ibm.com/products/netezza
+ DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza")
+ // [InfluxDB]
+ // Stability: development
+ //
+ // [InfluxDB]: https://www.influxdata.com/
+ DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb")
+ // [Instant]
+ // Stability: development
+ //
+ // [Instant]: https://www.instantdb.com/
+ DBSystemNameInstantDB = DBSystemNameKey.String("instantdb")
+ // [MariaDB]
+ // Stability: stable
+ //
+ // [MariaDB]: https://mariadb.org/
+ DBSystemNameMariaDB = DBSystemNameKey.String("mariadb")
+ // [Memcached]
+ // Stability: development
+ //
+ // [Memcached]: https://memcached.org/
+ DBSystemNameMemcached = DBSystemNameKey.String("memcached")
+ // [MongoDB]
+ // Stability: development
+ //
+ // [MongoDB]: https://www.mongodb.com/
+ DBSystemNameMongoDB = DBSystemNameKey.String("mongodb")
+ // [Microsoft SQL Server]
+ // Stability: stable
+ //
+ // [Microsoft SQL Server]: https://www.microsoft.com/sql-server
+ DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server")
+ // [MySQL]
+ // Stability: stable
+ //
+ // [MySQL]: https://www.mysql.com/
+ DBSystemNameMySQL = DBSystemNameKey.String("mysql")
+ // [Neo4j]
+ // Stability: development
+ //
+ // [Neo4j]: https://neo4j.com/
+ DBSystemNameNeo4j = DBSystemNameKey.String("neo4j")
+ // [OpenSearch]
+ // Stability: development
+ //
+ // [OpenSearch]: https://opensearch.org/
+ DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch")
+ // [Oracle Database]
+ // Stability: development
+ //
+ // [Oracle Database]: https://www.oracle.com/database/
+ DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db")
+ // [PostgreSQL]
+ // Stability: stable
+ //
+ // [PostgreSQL]: https://www.postgresql.org/
+ DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql")
+ // [Redis]
+ // Stability: development
+ //
+ // [Redis]: https://redis.io/
+ DBSystemNameRedis = DBSystemNameKey.String("redis")
+ // [SAP HANA]
+ // Stability: development
+ //
+ // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html
+ DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana")
+ // [SAP MaxDB]
+ // Stability: development
+ //
+ // [SAP MaxDB]: https://maxdb.sap.com/
+ DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb")
+ // [SQLite]
+ // Stability: development
+ //
+ // [SQLite]: https://www.sqlite.org/
+ DBSystemNameSQLite = DBSystemNameKey.String("sqlite")
+ // [Teradata]
+ // Stability: development
+ //
+ // [Teradata]: https://www.teradata.com/
+ DBSystemNameTeradata = DBSystemNameKey.String("teradata")
+ // [Trino]
+ // Stability: development
+ //
+ // [Trino]: https://trino.io/
+ DBSystemNameTrino = DBSystemNameKey.String("trino")
+)
+
+// Namespace: deployment
+const (
+ // DeploymentEnvironmentNameKey is the attribute Key conforming to the
+ // "deployment.environment.name" semantic conventions. It represents the name of
+ // the [deployment environment] (aka deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "staging", "production"
+ // Note: `deployment.environment.name` does not affect the uniqueness
+ // constraints defined through
+ // the `service.namespace`, `service.name` and `service.instance.id` resource
+ // attributes.
+ // This implies that resources carrying the following attribute combinations
+ // MUST be
+ // considered to be identifying the same service:
+ //
+ // - `service.name=frontend`, `deployment.environment.name=production`
+ // - `service.name=frontend`, `deployment.environment.name=staging`.
+ //
+ //
+ // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment
+ DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name")
+
+ // DeploymentIDKey is the attribute Key conforming to the "deployment.id"
+ // semantic conventions. It represents the id of the deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1208"
+ DeploymentIDKey = attribute.Key("deployment.id")
+
+ // DeploymentNameKey is the attribute Key conforming to the "deployment.name"
+ // semantic conventions. It represents the name of the deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "deploy my app", "deploy-frontend"
+ DeploymentNameKey = attribute.Key("deployment.name")
+
+ // DeploymentStatusKey is the attribute Key conforming to the
+ // "deployment.status" semantic conventions. It represents the status of the
+ // deployment.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ DeploymentStatusKey = attribute.Key("deployment.status")
+)
+
+// DeploymentEnvironmentName returns an attribute KeyValue conforming to the
+// "deployment.environment.name" semantic conventions. It represents the name of
+// the [deployment environment] (aka deployment tier).
+//
+// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment
+func DeploymentEnvironmentName(val string) attribute.KeyValue {
+ return DeploymentEnvironmentNameKey.String(val)
+}
+
+// DeploymentID returns an attribute KeyValue conforming to the "deployment.id"
+// semantic conventions. It represents the id of the deployment.
+func DeploymentID(val string) attribute.KeyValue {
+ return DeploymentIDKey.String(val)
+}
+
+// DeploymentName returns an attribute KeyValue conforming to the
+// "deployment.name" semantic conventions. It represents the name of the
+// deployment.
+func DeploymentName(val string) attribute.KeyValue {
+ return DeploymentNameKey.String(val)
+}
+
+// Enum values for deployment.status
+var (
+ // failed
+ // Stability: development
+ DeploymentStatusFailed = DeploymentStatusKey.String("failed")
+ // succeeded
+ // Stability: development
+ DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded")
+)
+
+// Namespace: destination
+const (
+ // DestinationAddressKey is the attribute Key conforming to the
+ // "destination.address" semantic conventions. It represents the destination
+ // address - domain name if available without reverse DNS lookup; otherwise, IP
+ // address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the source side, and when communicating through an
+ // intermediary, `destination.address` SHOULD represent the destination address
+ // behind any intermediaries, for example proxies, if it's available.
+ DestinationAddressKey = attribute.Key("destination.address")
+
+ // DestinationPortKey is the attribute Key conforming to the "destination.port"
+ // semantic conventions. It represents the destination port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3389, 2888
+ DestinationPortKey = attribute.Key("destination.port")
+)
+
+// DestinationAddress returns an attribute KeyValue conforming to the
+// "destination.address" semantic conventions. It represents the destination
+// address - domain name if available without reverse DNS lookup; otherwise, IP
+// address or Unix domain socket name.
+func DestinationAddress(val string) attribute.KeyValue {
+ return DestinationAddressKey.String(val)
+}
+
+// DestinationPort returns an attribute KeyValue conforming to the
+// "destination.port" semantic conventions. It represents the destination port
+// number.
+func DestinationPort(val int) attribute.KeyValue {
+ return DestinationPortKey.Int(val)
+}
+
+// Namespace: device
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123456789012345", "01:23:45:67:89:AB"
+ // Note: Its value SHOULD be identical for all apps on a device and it SHOULD
+ // NOT change if an app is uninstalled and re-installed.
+ // However, it might be resettable by the user for all apps on a device.
+ // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be
+ // used as values.
+ //
+ // More information about Android identifier best practices can be found [here]
+ // .
+ //
+ // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution
+ // > should be taken when storing personal data or anything which can identify a
+ // > user. GDPR and data protection laws may apply,
+ // > ensure you do your own due diligence.> Due to these reasons, this
+ // > identifier is not recommended for consumer applications and will likely
+ // > result in rejection from both Google Play and App Store.
+ // > However, it may be appropriate for specific enterprise scenarios, such as
+ // > kiosk devices or enterprise-managed devices, with appropriate compliance
+ // > clearance.
+ // > Any instrumentation providing this identifier MUST implement it as an
+ // > opt-in feature.> See [`app.installation.id`]> for a more
+ // > privacy-preserving alternative.
+ //
+ // [here]: https://developer.android.com/training/articles/user-data-ids
+ // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of the
+ // device manufacturer.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Apple", "Samsung"
+ // Note: The Android OS provides this field via [Build]. iOS apps SHOULD
+ // hardcode the value `Apple`.
+ //
+ // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iPhone3,4", "SM-G920F"
+ // Note: It's recommended this value represents a machine-readable version of
+ // the model identifier rather than the market or consumer-friendly name of the
+ // device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the "device.model.name"
+ // semantic conventions. It represents the marketing name for the device model.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iPhone 6s Plus", "Samsung Galaxy S6"
+ // Note: It's recommended this value represents a human-readable version of the
+ // device model rather than a machine-readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic
+// conventions. It represents a unique identifier representing the device.
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer.
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device.
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name for
+// the device model.
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// Namespace: disk
+const (
+ // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction"
+ // semantic conventions. It represents the disk IO operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "read"
+ DiskIODirectionKey = attribute.Key("disk.io.direction")
+)
+
+// Enum values for disk.io.direction
+var (
+ // read
+ // Stability: development
+ DiskIODirectionRead = DiskIODirectionKey.String("read")
+ // write
+ // Stability: development
+ DiskIODirectionWrite = DiskIODirectionKey.String("write")
+)
+
+// Namespace: dns
+const (
+ // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name"
+ // semantic conventions. It represents the name being queried.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "www.example.com", "opentelemetry.io"
+ // Note: If the name field contains non-printable characters (below 32 or above
+ // 126), those characters should be represented as escaped base 10 integers
+ // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns,
+ // and line feeds should be converted to \t, \r, and \n respectively.
+ DNSQuestionNameKey = attribute.Key("dns.question.name")
+)
+
+// DNSQuestionName returns an attribute KeyValue conforming to the
+// "dns.question.name" semantic conventions. It represents the name being
+// queried.
+func DNSQuestionName(val string) attribute.KeyValue {
+ return DNSQuestionNameKey.String(val)
+}
+
+// Namespace: elasticsearch
+const (
+ // ElasticsearchNodeNameKey is the attribute Key conforming to the
+ // "elasticsearch.node.name" semantic conventions. It represents the represents
+ // the human-readable identifier of the node/instance to which a request was
+ // routed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "instance-0000000001"
+ ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name")
+)
+
+// ElasticsearchNodeName returns an attribute KeyValue conforming to the
+// "elasticsearch.node.name" semantic conventions. It represents the represents
+// the human-readable identifier of the node/instance to which a request was
+// routed.
+func ElasticsearchNodeName(val string) attribute.KeyValue {
+ return ElasticsearchNodeNameKey.String(val)
+}
+
+// Namespace: enduser
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic
+ // conventions. It represents the unique identifier of an end user in the
+ // system. It maybe a username, email address, or other identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "username"
+ // Note: Unique identifier of an end user in the system.
+ //
+ // > [!Warning]
+ // > This field contains sensitive (PII) information.
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id"
+ // semantic conventions. It represents the pseudonymous identifier of an end
+ // user. This identifier should be a random value that is not directly linked or
+ // associated with the end user's actual identity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "QdH5CAWJgqVT4rOr0qtumf"
+ // Note: Pseudonymous identifier of an end user.
+ //
+ // > [!Warning]
+ // > This field contains sensitive (linkable PII) information.
+ EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the unique identifier of an end user in
+// the system. It maybe a username, email address, or other identifier.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserPseudoID returns an attribute KeyValue conforming to the
+// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous
+// identifier of an end user. This identifier should be a random value that is
+// not directly linked or associated with the end user's actual identity.
+func EnduserPseudoID(val string) attribute.KeyValue {
+ return EnduserPseudoIDKey.String(val)
+}
+
+// Namespace: error
+const (
+ // ErrorMessageKey is the attribute Key conforming to the "error.message"
+ // semantic conventions. It represents a message providing more detail about an
+ // error in human-readable form.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Unexpected input type: string", "The user has exceeded their
+ // storage quota"
+ // Note: `error.message` should provide additional context and detail about an
+ // error.
+ // It is NOT RECOMMENDED to duplicate the value of `error.type` in
+ // `error.message`.
+ // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in
+ // `error.message`.
+ //
+ // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded
+ // cardinality and overlap with span status.
+ ErrorMessageKey = attribute.Key("error.message")
+
+ // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic
+ // conventions. It represents the describes a class of error the operation ended
+ // with.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "timeout", "java.net.UnknownHostException",
+ // "server_certificate_invalid", "500"
+ // Note: The `error.type` SHOULD be predictable, and SHOULD have low
+ // cardinality.
+ //
+ // When `error.type` is set to a type (e.g., an exception type), its
+ // canonical class name identifying the type within the artifact SHOULD be used.
+ //
+ // Instrumentations SHOULD document the list of errors they report.
+ //
+ // The cardinality of `error.type` within one instrumentation library SHOULD be
+ // low.
+ // Telemetry consumers that aggregate data from multiple instrumentation
+ // libraries and applications
+ // should be prepared for `error.type` to have high cardinality at query time
+ // when no
+ // additional filters are applied.
+ //
+ // If the operation has completed successfully, instrumentations SHOULD NOT set
+ // `error.type`.
+ //
+ // If a specific domain defines its own set of error identifiers (such as HTTP
+ // or gRPC status codes),
+ // it's RECOMMENDED to:
+ //
+ // - Use a domain-specific attribute
+ // - Set `error.type` to capture all errors, regardless of whether they are
+ // defined within the domain-specific set or not.
+ ErrorTypeKey = attribute.Key("error.type")
+)
+
+// ErrorMessage returns an attribute KeyValue conforming to the "error.message"
+// semantic conventions. It represents a message providing more detail about an
+// error in human-readable form.
+func ErrorMessage(val string) attribute.KeyValue {
+ return ErrorMessageKey.String(val)
+}
+
+// Enum values for error.type
+var (
+ // A fallback error value to be used when the instrumentation doesn't define a
+ // custom value.
+ //
+ // Stability: stable
+ ErrorTypeOther = ErrorTypeKey.String("_OTHER")
+)
+
+// Namespace: exception
+const (
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "Division by zero", "Can't convert 'int' object to str implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+ // string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at
+ // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at
+ // com.example.GenerateTrace.main(GenerateTrace.java:5)
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the exception
+ // should be preferred over the static type in languages that support it.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "java.net.ConnectException", "OSError"
+ ExceptionTypeKey = attribute.Key("exception.type")
+)
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// ExceptionType returns an attribute KeyValue conforming to the "exception.type"
+// semantic conventions. It represents the type of the exception (its
+// fully-qualified class name, if applicable). The dynamic type of the exception
+// should be preferred over the static type in languages that support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// Namespace: faas
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the serverless
+ // function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron Expression].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0/5 * * * ? *
+ //
+ // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm
+ FaaSCronKey = attribute.Key("faas.cron")
+
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name of
+ // the source on which the triggering operation was performed. For example, in
+ // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+ // database name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "myBucketName", "myDbName"
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or S3 is
+ // the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "myFile.txt", "myTableName"
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the describes
+ // the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string containing
+ // the time when the data was accessed in the [ISO 8601] format expressed in
+ // [UTC].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 2020-01-23T13:47:06Z
+ //
+ // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+ // [UTC]: https://www.w3.org/TR/NOTE-datetime
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a string,
+ // that will be potentially reused for other invocations to the same
+ // function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de"
+ // Note: - **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation ID of
+ // the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+
+ // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name"
+ // semantic conventions. It represents the name of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: my-function
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked
+ // function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud region of
+ // the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: eu-central-1
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked
+ // function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory"
+ // semantic conventions. It represents the amount of memory available to the
+ // serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Note: It's recommended to set this attribute since e.g. too little memory can
+ // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda,
+ // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this
+ // information (which must be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this runtime
+ // instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-function", "myazurefunctionapp/some-function-name"
+ // Note: This is the name of the function as configured/deployed on the FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function.name`]
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud providers/products:
+ //
+ // - **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ //
+ //
+ // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation time
+ // in the [ISO 8601] format expressed in [UTC].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 2020-01-23T13:47:06Z
+ //
+ // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+ // [UTC]: https://www.w3.org/TR/NOTE-datetime
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic
+ // conventions. It represents the type of the trigger which caused this function
+ // invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic
+ // conventions. It represents the immutable version of the function being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "26", "pinkfroid-00002"
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // - **AWS Lambda:** The [function version]
+ // (an integer represented as a decimal string).
+ // - **Google Cloud Run (Services):** The [revision]
+ // (i.e., the function name plus the revision suffix).
+ // - **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment variable].
+ // - **Azure Functions:** Not applicable. Do not set this attribute.
+ //
+ //
+ // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html
+ // [revision]: https://cloud.google.com/run/docs/managing/revisions
+ // [`K_REVISION` environment variable]: https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically
+ FaaSVersionKey = attribute.Key("faas.version")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart"
+// semantic conventions. It represents a boolean that is true if the serverless
+// function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic
+// conventions. It represents a string containing the schedule period as
+// [Cron Expression].
+//
+// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of the
+// source on which the triggering operation was performed. For example, in Cloud
+// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database
+// name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3 is
+// the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO 8601] format expressed in
+// [UTC].
+//
+// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+// [UTC]: https://www.w3.org/TR/NOTE-datetime
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance"
+// semantic conventions. It represents the execution environment ID as a string,
+// that will be potentially reused for other invocations to the same
+// function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID of
+// the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region of
+// the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic
+// conventions. It represents the name of the single function that this runtime
+// instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic
+// conventions. It represents a string containing the function invocation time in
+// the [ISO 8601] format expressed in [UTC].
+//
+// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+// [UTC]: https://www.w3.org/TR/NOTE-datetime
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the "faas.version"
+// semantic conventions. It represents the immutable version of the function
+// being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// Enum values for faas.document.operation
+var (
+ // When a new object is created.
+ // Stability: development
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified.
+ // Stability: development
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted.
+ // Stability: development
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// Enum values for faas.invoked_provider
+var (
+ // Alibaba Cloud
+ // Stability: development
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ // Stability: development
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ // Stability: development
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ // Stability: development
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ // Stability: development
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// Enum values for faas.trigger
+var (
+ // A response to some data source operation such as a database or filesystem
+ // read/write
+ // Stability: development
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ // Stability: development
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ // Stability: development
+ FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ // Stability: development
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ // Stability: development
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// Namespace: feature_flag
+const (
+ // FeatureFlagContextIDKey is the attribute Key conforming to the
+ // "feature_flag.context.id" semantic conventions. It represents the unique
+ // identifier for the flag evaluation context. For example, the targeting key.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db"
+ FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id")
+
+ // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key"
+ // semantic conventions. It represents the lookup key of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "logo-color"
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider.name" semantic conventions. It represents the
+ // identifies the feature flag provider.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Flag Manager"
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name")
+
+ // FeatureFlagResultReasonKey is the attribute Key conforming to the
+ // "feature_flag.result.reason" semantic conventions. It represents the reason
+ // code which shows how a feature flag value was determined.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "static", "targeting_match", "error", "default"
+ FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason")
+
+ // FeatureFlagResultValueKey is the attribute Key conforming to the
+ // "feature_flag.result.value" semantic conventions. It represents the evaluated
+ // value of the feature flag.
+ //
+ // Type: any
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "#ff0000", true, 3
+ // Note: With some feature flag providers, feature flag results can be quite
+ // large or contain private or sensitive details.
+ // Because of this, `feature_flag.result.variant` is often the preferred
+ // attribute if it is available.
+ //
+ // It may be desirable to redact or otherwise limit the size and scope of
+ // `feature_flag.result.value` if possible.
+ // Because the evaluated flag value is unstructured and may be any type, it is
+ // left to the instrumentation author to determine how best to achieve this.
+ FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value")
+
+ // FeatureFlagResultVariantKey is the attribute Key conforming to the
+ // "feature_flag.result.variant" semantic conventions. It represents a semantic
+ // identifier for an evaluated flag value.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "red", "true", "on"
+ // Note: A semantic identifier, commonly referred to as a variant, provides a
+ // means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant")
+
+ // FeatureFlagSetIDKey is the attribute Key conforming to the
+ // "feature_flag.set.id" semantic conventions. It represents the identifier of
+ // the [flag set] to which the feature flag belongs.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "proj-1", "ab98sgs", "service1/dev"
+ //
+ // [flag set]: https://openfeature.dev/specification/glossary/#flag-set
+ FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id")
+
+ // FeatureFlagVersionKey is the attribute Key conforming to the
+ // "feature_flag.version" semantic conventions. It represents the version of the
+ // ruleset used during the evaluation. This may be any stable value which
+ // uniquely identifies the ruleset.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1", "01ABCDEF"
+ FeatureFlagVersionKey = attribute.Key("feature_flag.version")
+)
+
+// FeatureFlagContextID returns an attribute KeyValue conforming to the
+// "feature_flag.context.id" semantic conventions. It represents the unique
+// identifier for the flag evaluation context. For example, the targeting key.
+func FeatureFlagContextID(val string) attribute.KeyValue {
+ return FeatureFlagContextIDKey.String(val)
+}
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the lookup key of the
+// feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider.name" semantic conventions. It represents the
+// identifies the feature flag provider.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagResultVariant returns an attribute KeyValue conforming to the
+// "feature_flag.result.variant" semantic conventions. It represents a semantic
+// identifier for an evaluated flag value.
+func FeatureFlagResultVariant(val string) attribute.KeyValue {
+ return FeatureFlagResultVariantKey.String(val)
+}
+
+// FeatureFlagSetID returns an attribute KeyValue conforming to the
+// "feature_flag.set.id" semantic conventions. It represents the identifier of
+// the [flag set] to which the feature flag belongs.
+//
+// [flag set]: https://openfeature.dev/specification/glossary/#flag-set
+func FeatureFlagSetID(val string) attribute.KeyValue {
+ return FeatureFlagSetIDKey.String(val)
+}
+
+// FeatureFlagVersion returns an attribute KeyValue conforming to the
+// "feature_flag.version" semantic conventions. It represents the version of the
+// ruleset used during the evaluation. This may be any stable value which
+// uniquely identifies the ruleset.
+func FeatureFlagVersion(val string) attribute.KeyValue {
+ return FeatureFlagVersionKey.String(val)
+}
+
+// Enum values for feature_flag.result.reason
+var (
+ // The resolved value is static (no dynamic evaluation).
+ // Stability: development
+ FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static")
+ // The resolved value fell back to a pre-configured value (no dynamic evaluation
+ // occurred or dynamic evaluation yielded no result).
+ // Stability: development
+ FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default")
+ // The resolved value was the result of a dynamic evaluation, such as a rule or
+ // specific user-targeting.
+ // Stability: development
+ FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match")
+ // The resolved value was the result of pseudorandom assignment.
+ // Stability: development
+ FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split")
+ // The resolved value was retrieved from cache.
+ // Stability: development
+ FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached")
+ // The resolved value was the result of the flag being disabled in the
+ // management system.
+ // Stability: development
+ FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled")
+ // The reason for the resolved value could not be determined.
+ // Stability: development
+ FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown")
+ // The resolved value is non-authoritative or possibly out of date
+ // Stability: development
+ FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale")
+ // The resolved value was the result of an error.
+ // Stability: development
+ FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error")
+)
+
+// Namespace: file
+const (
+ // FileAccessedKey is the attribute Key conforming to the "file.accessed"
+ // semantic conventions. It represents the time when the file was last accessed,
+ // in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ // Note: This attribute might not be supported by some file systems — NFS,
+ // FAT32, in embedded OS, etc.
+ FileAccessedKey = attribute.Key("file.accessed")
+
+ // FileAttributesKey is the attribute Key conforming to the "file.attributes"
+ // semantic conventions. It represents the array of file attributes.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "readonly", "hidden"
+ // Note: Attributes names depend on the OS or file system. Here’s a
+ // non-exhaustive list of values expected for this attribute: `archive`,
+ // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`,
+ // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`,
+ // `write`.
+ FileAttributesKey = attribute.Key("file.attributes")
+
+ // FileChangedKey is the attribute Key conforming to the "file.changed" semantic
+ // conventions. It represents the time when the file attributes or metadata was
+ // last changed, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ // Note: `file.changed` captures the time when any of the file's properties or
+ // attributes (including the content) are changed, while `file.modified`
+ // captures the timestamp when the file content is modified.
+ FileChangedKey = attribute.Key("file.changed")
+
+ // FileCreatedKey is the attribute Key conforming to the "file.created" semantic
+ // conventions. It represents the time when the file was created, in ISO 8601
+ // format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ // Note: This attribute might not be supported by some file systems — NFS,
+ // FAT32, in embedded OS, etc.
+ FileCreatedKey = attribute.Key("file.created")
+
+ // FileDirectoryKey is the attribute Key conforming to the "file.directory"
+ // semantic conventions. It represents the directory where the file is located.
+ // It should include the drive letter, when appropriate.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/home/user", "C:\Program Files\MyApp"
+ FileDirectoryKey = attribute.Key("file.directory")
+
+ // FileExtensionKey is the attribute Key conforming to the "file.extension"
+ // semantic conventions. It represents the file extension, excluding the leading
+ // dot.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "png", "gz"
+ // Note: When the file name has multiple extensions (example.tar.gz), only the
+ // last one should be captured ("gz", not "tar.gz").
+ FileExtensionKey = attribute.Key("file.extension")
+
+ // FileForkNameKey is the attribute Key conforming to the "file.fork_name"
+ // semantic conventions. It represents the name of the fork. A fork is
+ // additional data associated with a filesystem object.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Zone.Identifer"
+ // Note: On Linux, a resource fork is used to store additional data with a
+ // filesystem object. A file always has at least one fork for the data portion,
+ // and additional forks may exist.
+ // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default
+ // data stream for a file is just called $DATA. Zone.Identifier is commonly used
+ // by Windows to track contents downloaded from the Internet. An ADS is
+ // typically of the form: C:\path\to\filename.extension:some_fork_name, and
+ // some_fork_name is the value that should populate `fork_name`.
+ // `filename.extension` should populate `file.name`, and `extension` should
+ // populate `file.extension`. The full path, `file.path`, will include the fork
+ // name.
+ FileForkNameKey = attribute.Key("file.fork_name")
+
+ // FileGroupIDKey is the attribute Key conforming to the "file.group.id"
+ // semantic conventions. It represents the primary Group ID (GID) of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1000"
+ FileGroupIDKey = attribute.Key("file.group.id")
+
+ // FileGroupNameKey is the attribute Key conforming to the "file.group.name"
+ // semantic conventions. It represents the primary group name of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "users"
+ FileGroupNameKey = attribute.Key("file.group.name")
+
+ // FileInodeKey is the attribute Key conforming to the "file.inode" semantic
+ // conventions. It represents the inode representing the file in the filesystem.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "256383"
+ FileInodeKey = attribute.Key("file.inode")
+
+ // FileModeKey is the attribute Key conforming to the "file.mode" semantic
+ // conventions. It represents the mode of the file in octal representation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0640"
+ FileModeKey = attribute.Key("file.mode")
+
+ // FileModifiedKey is the attribute Key conforming to the "file.modified"
+ // semantic conventions. It represents the time when the file content was last
+ // modified, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ FileModifiedKey = attribute.Key("file.modified")
+
+ // FileNameKey is the attribute Key conforming to the "file.name" semantic
+ // conventions. It represents the name of the file including the extension,
+ // without the directory.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "example.png"
+ FileNameKey = attribute.Key("file.name")
+
+ // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id"
+ // semantic conventions. It represents the user ID (UID) or security identifier
+ // (SID) of the file owner.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1000"
+ FileOwnerIDKey = attribute.Key("file.owner.id")
+
+ // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name"
+ // semantic conventions. It represents the username of the file owner.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "root"
+ FileOwnerNameKey = attribute.Key("file.owner.name")
+
+ // FilePathKey is the attribute Key conforming to the "file.path" semantic
+ // conventions. It represents the full path to the file, including the file
+ // name. It should include the drive letter, when appropriate.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe"
+ FilePathKey = attribute.Key("file.path")
+
+ // FileSizeKey is the attribute Key conforming to the "file.size" semantic
+ // conventions. It represents the file size in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FileSizeKey = attribute.Key("file.size")
+
+ // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the
+ // "file.symbolic_link.target_path" semantic conventions. It represents the path
+ // to the target of a symbolic link.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/usr/bin/python3"
+ // Note: This attribute is only applicable to symbolic links.
+ FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path")
+)
+
+// FileAccessed returns an attribute KeyValue conforming to the "file.accessed"
+// semantic conventions. It represents the time when the file was last accessed,
+// in ISO 8601 format.
+func FileAccessed(val string) attribute.KeyValue {
+ return FileAccessedKey.String(val)
+}
+
+// FileAttributes returns an attribute KeyValue conforming to the
+// "file.attributes" semantic conventions. It represents the array of file
+// attributes.
+func FileAttributes(val ...string) attribute.KeyValue {
+ return FileAttributesKey.StringSlice(val)
+}
+
+// FileChanged returns an attribute KeyValue conforming to the "file.changed"
+// semantic conventions. It represents the time when the file attributes or
+// metadata was last changed, in ISO 8601 format.
+func FileChanged(val string) attribute.KeyValue {
+ return FileChangedKey.String(val)
+}
+
+// FileCreated returns an attribute KeyValue conforming to the "file.created"
+// semantic conventions. It represents the time when the file was created, in ISO
+// 8601 format.
+func FileCreated(val string) attribute.KeyValue {
+ return FileCreatedKey.String(val)
+}
+
+// FileDirectory returns an attribute KeyValue conforming to the "file.directory"
+// semantic conventions. It represents the directory where the file is located.
+// It should include the drive letter, when appropriate.
+func FileDirectory(val string) attribute.KeyValue {
+ return FileDirectoryKey.String(val)
+}
+
+// FileExtension returns an attribute KeyValue conforming to the "file.extension"
+// semantic conventions. It represents the file extension, excluding the leading
+// dot.
+func FileExtension(val string) attribute.KeyValue {
+ return FileExtensionKey.String(val)
+}
+
+// FileForkName returns an attribute KeyValue conforming to the "file.fork_name"
+// semantic conventions. It represents the name of the fork. A fork is additional
+// data associated with a filesystem object.
+func FileForkName(val string) attribute.KeyValue {
+ return FileForkNameKey.String(val)
+}
+
+// FileGroupID returns an attribute KeyValue conforming to the "file.group.id"
+// semantic conventions. It represents the primary Group ID (GID) of the file.
+func FileGroupID(val string) attribute.KeyValue {
+ return FileGroupIDKey.String(val)
+}
+
+// FileGroupName returns an attribute KeyValue conforming to the
+// "file.group.name" semantic conventions. It represents the primary group name
+// of the file.
+func FileGroupName(val string) attribute.KeyValue {
+ return FileGroupNameKey.String(val)
+}
+
+// FileInode returns an attribute KeyValue conforming to the "file.inode"
+// semantic conventions. It represents the inode representing the file in the
+// filesystem.
+func FileInode(val string) attribute.KeyValue {
+ return FileInodeKey.String(val)
+}
+
+// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic
+// conventions. It represents the mode of the file in octal representation.
+func FileMode(val string) attribute.KeyValue {
+ return FileModeKey.String(val)
+}
+
+// FileModified returns an attribute KeyValue conforming to the "file.modified"
+// semantic conventions. It represents the time when the file content was last
+// modified, in ISO 8601 format.
+func FileModified(val string) attribute.KeyValue {
+ return FileModifiedKey.String(val)
+}
+
+// FileName returns an attribute KeyValue conforming to the "file.name" semantic
+// conventions. It represents the name of the file including the extension,
+// without the directory.
+func FileName(val string) attribute.KeyValue {
+ return FileNameKey.String(val)
+}
+
+// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id"
+// semantic conventions. It represents the user ID (UID) or security identifier
+// (SID) of the file owner.
+func FileOwnerID(val string) attribute.KeyValue {
+ return FileOwnerIDKey.String(val)
+}
+
+// FileOwnerName returns an attribute KeyValue conforming to the
+// "file.owner.name" semantic conventions. It represents the username of the file
+// owner.
+func FileOwnerName(val string) attribute.KeyValue {
+ return FileOwnerNameKey.String(val)
+}
+
+// FilePath returns an attribute KeyValue conforming to the "file.path" semantic
+// conventions. It represents the full path to the file, including the file name.
+// It should include the drive letter, when appropriate.
+func FilePath(val string) attribute.KeyValue {
+ return FilePathKey.String(val)
+}
+
+// FileSize returns an attribute KeyValue conforming to the "file.size" semantic
+// conventions. It represents the file size in bytes.
+func FileSize(val int) attribute.KeyValue {
+ return FileSizeKey.Int(val)
+}
+
+// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the
+// "file.symbolic_link.target_path" semantic conventions. It represents the path
+// to the target of a symbolic link.
+func FileSymbolicLinkTargetPath(val string) attribute.KeyValue {
+ return FileSymbolicLinkTargetPathKey.String(val)
+}
+
+// Namespace: gcp
+const (
+ // GCPAppHubApplicationContainerKey is the attribute Key conforming to the
+ // "gcp.apphub.application.container" semantic conventions. It represents the
+ // container within GCP where the AppHub application is defined.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "projects/my-container-project"
+ GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container")
+
+ // GCPAppHubApplicationIDKey is the attribute Key conforming to the
+ // "gcp.apphub.application.id" semantic conventions. It represents the name of
+ // the application as configured in AppHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-application"
+ GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id")
+
+ // GCPAppHubApplicationLocationKey is the attribute Key conforming to the
+ // "gcp.apphub.application.location" semantic conventions. It represents the GCP
+ // zone or region where the application is defined.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "us-central1"
+ GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location")
+
+ // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.service.criticality_type" semantic conventions. It represents the
+ // criticality of a service indicates its importance to the business.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub type enum]
+ //
+ // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type
+ GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type")
+
+ // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.service.environment_type" semantic conventions. It represents the
+ // environment of a service is the stage of a software lifecycle.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub environment type]
+ //
+ // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1
+ GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type")
+
+ // GCPAppHubServiceIDKey is the attribute Key conforming to the
+ // "gcp.apphub.service.id" semantic conventions. It represents the name of the
+ // service as configured in AppHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-service"
+ GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id")
+
+ // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.workload.criticality_type" semantic conventions. It represents
+ // the criticality of a workload indicates its importance to the business.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub type enum]
+ //
+ // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type
+ GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type")
+
+ // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.workload.environment_type" semantic conventions. It represents
+ // the environment of a workload is the stage of a software lifecycle.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub environment type]
+ //
+ // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1
+ GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type")
+
+ // GCPAppHubWorkloadIDKey is the attribute Key conforming to the
+ // "gcp.apphub.workload.id" semantic conventions. It represents the name of the
+ // workload as configured in AppHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-workload"
+ GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id")
+
+ // GCPClientServiceKey is the attribute Key conforming to the
+ // "gcp.client.service" semantic conventions. It represents the identifies the
+ // Google Cloud service for which the official client library is intended.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "appengine", "run", "firestore", "alloydb", "spanner"
+ // Note: Intended to be a stable identifier for Google Cloud client libraries
+ // that is uniform across implementation languages. The value should be derived
+ // from the canonical service domain for the service; for example,
+ // 'foo.googleapis.com' should result in a value of 'foo'.
+ GCPClientServiceKey = attribute.Key("gcp.client.service")
+
+ // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of
+ // the Cloud Run [execution] being run for the Job, as set by the
+ // [`CLOUD_RUN_EXECUTION`] environment variable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "job-name-xxxx", "sample-job-mdw84"
+ //
+ // [execution]: https://cloud.google.com/run/docs/managing/job-executions
+ // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+ GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
+
+ // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+ // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`]
+ // environment variable.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 1
+ //
+ // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+ GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
+
+ // GCPGCEInstanceHostnameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+ // of a GCE instance. This is the full value of the default or [custom hostname]
+ // .
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-host1234.example.com",
+ // "sample-vm.us-west1-b.c.my-project.internal"
+ //
+ // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm
+ GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
+
+ // GCPGCEInstanceNameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.name" semantic conventions. It represents the instance name
+ // of a GCE instance. This is the value provided by `host.name`, the visible
+ // name of the instance in the Cloud Console UI, and the prefix for the default
+ // hostname of the instance as defined by the [default internal DNS name].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "instance-1", "my-vm-name"
+ //
+ // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names
+ GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name")
+)
+
+// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the
+// "gcp.apphub.application.container" semantic conventions. It represents the
+// container within GCP where the AppHub application is defined.
+func GCPAppHubApplicationContainer(val string) attribute.KeyValue {
+ return GCPAppHubApplicationContainerKey.String(val)
+}
+
+// GCPAppHubApplicationID returns an attribute KeyValue conforming to the
+// "gcp.apphub.application.id" semantic conventions. It represents the name of
+// the application as configured in AppHub.
+func GCPAppHubApplicationID(val string) attribute.KeyValue {
+ return GCPAppHubApplicationIDKey.String(val)
+}
+
+// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the
+// "gcp.apphub.application.location" semantic conventions. It represents the GCP
+// zone or region where the application is defined.
+func GCPAppHubApplicationLocation(val string) attribute.KeyValue {
+ return GCPAppHubApplicationLocationKey.String(val)
+}
+
+// GCPAppHubServiceID returns an attribute KeyValue conforming to the
+// "gcp.apphub.service.id" semantic conventions. It represents the name of the
+// service as configured in AppHub.
+func GCPAppHubServiceID(val string) attribute.KeyValue {
+ return GCPAppHubServiceIDKey.String(val)
+}
+
+// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the
+// "gcp.apphub.workload.id" semantic conventions. It represents the name of the
+// workload as configured in AppHub.
+func GCPAppHubWorkloadID(val string) attribute.KeyValue {
+ return GCPAppHubWorkloadIDKey.String(val)
+}
+
+// GCPClientService returns an attribute KeyValue conforming to the
+// "gcp.client.service" semantic conventions. It represents the identifies the
+// Google Cloud service for which the official client library is intended.
+func GCPClientService(val string) attribute.KeyValue {
+ return GCPClientServiceKey.String(val)
+}
+
+// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of
+// the Cloud Run [execution] being run for the Job, as set by the
+// [`CLOUD_RUN_EXECUTION`] environment variable.
+//
+// [execution]: https://cloud.google.com/run/docs/managing/job-executions
+// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+func GCPCloudRunJobExecution(val string) attribute.KeyValue {
+ return GCPCloudRunJobExecutionKey.String(val)
+}
+
+// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`]
+// environment variable.
+//
+// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
+ return GCPCloudRunJobTaskIndexKey.Int(val)
+}
+
+// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+// of a GCE instance. This is the full value of the default or [custom hostname]
+// .
+//
+// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm
+func GCPGCEInstanceHostname(val string) attribute.KeyValue {
+ return GCPGCEInstanceHostnameKey.String(val)
+}
+
+// GCPGCEInstanceName returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.name" semantic conventions. It represents the instance name
+// of a GCE instance. This is the value provided by `host.name`, the visible name
+// of the instance in the Cloud Console UI, and the prefix for the default
+// hostname of the instance as defined by the [default internal DNS name].
+//
+// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names
+func GCPGCEInstanceName(val string) attribute.KeyValue {
+ return GCPGCEInstanceNameKey.String(val)
+}
+
+// Enum values for gcp.apphub.service.criticality_type
+var (
+ // Mission critical service.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL")
+ // High impact.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH")
+ // Medium impact.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM")
+ // Low impact.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW")
+)
+
+// Enum values for gcp.apphub.service.environment_type
+var (
+ // Production environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION")
+ // Staging environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING")
+ // Test environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST")
+ // Development environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT")
+)
+
+// Enum values for gcp.apphub.workload.criticality_type
+var (
+ // Mission critical service.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL")
+ // High impact.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH")
+ // Medium impact.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM")
+ // Low impact.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW")
+)
+
+// Enum values for gcp.apphub.workload.environment_type
+var (
+ // Production environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION")
+ // Staging environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING")
+ // Test environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST")
+ // Development environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT")
+)
+
+// Namespace: gen_ai
+const (
+ // GenAIAgentDescriptionKey is the attribute Key conforming to the
+ // "gen_ai.agent.description" semantic conventions. It represents the free-form
+ // description of the GenAI agent provided by the application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Helps with math problems", "Generates fiction stories"
+ GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description")
+
+ // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id"
+ // semantic conventions. It represents the unique identifier of the GenAI agent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY"
+ GenAIAgentIDKey = attribute.Key("gen_ai.agent.id")
+
+ // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name"
+ // semantic conventions. It represents the human-readable name of the GenAI
+ // agent provided by the application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Math Tutor", "Fiction Writer"
+ GenAIAgentNameKey = attribute.Key("gen_ai.agent.name")
+
+ // GenAIConversationIDKey is the attribute Key conforming to the
+ // "gen_ai.conversation.id" semantic conventions. It represents the unique
+ // identifier for a conversation (session, thread), used to store and correlate
+ // messages within this conversation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY"
+ GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id")
+
+ // GenAIDataSourceIDKey is the attribute Key conforming to the
+ // "gen_ai.data_source.id" semantic conventions. It represents the data source
+ // identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "H7STPQYOND"
+ // Note: Data sources are used by AI agents and RAG applications to store
+ // grounding data. A data source may be an external database, object store,
+ // document collection, website, or any other storage system used by the GenAI
+ // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier
+ // used by the GenAI system rather than a name specific to the external storage,
+ // such as a database or object store. Semantic conventions referencing
+ // `gen_ai.data_source.id` MAY also leverage additional attributes, such as
+ // `db.*`, to further identify and describe the data source.
+ GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id")
+
+ // GenAIOpenAIRequestServiceTierKey is the attribute Key conforming to the
+ // "gen_ai.openai.request.service_tier" semantic conventions. It represents the
+ // service tier requested. May be a specific tier, default, or auto.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "auto", "default"
+ GenAIOpenAIRequestServiceTierKey = attribute.Key("gen_ai.openai.request.service_tier")
+
+ // GenAIOpenAIResponseServiceTierKey is the attribute Key conforming to the
+ // "gen_ai.openai.response.service_tier" semantic conventions. It represents the
+ // service tier used for the response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "scale", "default"
+ GenAIOpenAIResponseServiceTierKey = attribute.Key("gen_ai.openai.response.service_tier")
+
+ // GenAIOpenAIResponseSystemFingerprintKey is the attribute Key conforming to
+ // the "gen_ai.openai.response.system_fingerprint" semantic conventions. It
+ // represents a fingerprint to track any eventual change in the Generative AI
+ // environment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "fp_44709d6fcb"
+ GenAIOpenAIResponseSystemFingerprintKey = attribute.Key("gen_ai.openai.response.system_fingerprint")
+
+ // GenAIOperationNameKey is the attribute Key conforming to the
+ // "gen_ai.operation.name" semantic conventions. It represents the name of the
+ // operation being performed.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: If one of the predefined values applies, but specific system uses a
+ // different name it's RECOMMENDED to document it in the semantic conventions
+ // for specific GenAI system and use system-specific name in the
+ // instrumentation. If a different name is not documented, instrumentation
+ // libraries SHOULD use applicable predefined value.
+ GenAIOperationNameKey = attribute.Key("gen_ai.operation.name")
+
+ // GenAIOutputTypeKey is the attribute Key conforming to the
+ // "gen_ai.output.type" semantic conventions. It represents the represents the
+ // content type requested by the client.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This attribute SHOULD be used when the client requests output of a
+ // specific type. The model may return zero or more outputs of this type.
+ // This attribute specifies the output modality and not the actual output
+ // format. For example, if an image is requested, the actual output could be a
+ // URL pointing to an image file.
+ // Additional output format details may be recorded in the future in the
+ // `gen_ai.output.{type}.*` attributes.
+ GenAIOutputTypeKey = attribute.Key("gen_ai.output.type")
+
+ // GenAIRequestChoiceCountKey is the attribute Key conforming to the
+ // "gen_ai.request.choice.count" semantic conventions. It represents the target
+ // number of candidate completions to return.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3
+ GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count")
+
+ // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the
+ // "gen_ai.request.encoding_formats" semantic conventions. It represents the
+ // encoding formats requested in an embeddings operation, if specified.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "base64"], ["float", "binary"
+ // Note: In some GenAI systems the encoding formats are called embedding types.
+ // Also, some GenAI systems only accept a single format per request.
+ GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats")
+
+ // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the
+ // "gen_ai.request.frequency_penalty" semantic conventions. It represents the
+ // frequency penalty setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.1
+ GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty")
+
+ // GenAIRequestMaxTokensKey is the attribute Key conforming to the
+ // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
+ // number of tokens the model generates for a request.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens")
+
+ // GenAIRequestModelKey is the attribute Key conforming to the
+ // "gen_ai.request.model" semantic conventions. It represents the name of the
+ // GenAI model a request is being made to.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: gpt-4
+ GenAIRequestModelKey = attribute.Key("gen_ai.request.model")
+
+ // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the
+ // "gen_ai.request.presence_penalty" semantic conventions. It represents the
+ // presence penalty setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.1
+ GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty")
+
+ // GenAIRequestSeedKey is the attribute Key conforming to the
+ // "gen_ai.request.seed" semantic conventions. It represents the requests with
+ // same seed value more likely to return same result.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed")
+
+ // GenAIRequestStopSequencesKey is the attribute Key conforming to the
+ // "gen_ai.request.stop_sequences" semantic conventions. It represents the list
+ // of sequences that the model will use to stop generating further tokens.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "forest", "lived"
+ GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences")
+
+ // GenAIRequestTemperatureKey is the attribute Key conforming to the
+ // "gen_ai.request.temperature" semantic conventions. It represents the
+ // temperature setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.0
+ GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature")
+
+ // GenAIRequestTopKKey is the attribute Key conforming to the
+ // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling
+ // setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0
+ GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k")
+
+ // GenAIRequestTopPKey is the attribute Key conforming to the
+ // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling
+ // setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0
+ GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p")
+
+ // GenAIResponseFinishReasonsKey is the attribute Key conforming to the
+ // "gen_ai.response.finish_reasons" semantic conventions. It represents the
+ // array of reasons the model stopped generating tokens, corresponding to each
+ // generation received.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "stop"], ["stop", "length"
+ GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons")
+
+ // GenAIResponseIDKey is the attribute Key conforming to the
+ // "gen_ai.response.id" semantic conventions. It represents the unique
+ // identifier for the completion.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "chatcmpl-123"
+ GenAIResponseIDKey = attribute.Key("gen_ai.response.id")
+
+ // GenAIResponseModelKey is the attribute Key conforming to the
+ // "gen_ai.response.model" semantic conventions. It represents the name of the
+ // model that generated the response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "gpt-4-0613"
+ GenAIResponseModelKey = attribute.Key("gen_ai.response.model")
+
+ // GenAISystemKey is the attribute Key conforming to the "gen_ai.system"
+ // semantic conventions. It represents the Generative AI product as identified
+ // by the client or server instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: openai
+ // Note: The `gen_ai.system` describes a family of GenAI models with specific
+ // model identified
+ // by `gen_ai.request.model` and `gen_ai.response.model` attributes.
+ //
+ // The actual GenAI product may differ from the one identified by the client.
+ // Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI
+ // client
+ // libraries. In such cases, the `gen_ai.system` is set to `openai` based on the
+ // instrumentation's best knowledge, instead of the actual system. The
+ // `server.address`
+ // attribute may help identify the actual system in use for `openai`.
+ //
+ // For custom model, a custom friendly name SHOULD be used.
+ // If none of these options apply, the `gen_ai.system` SHOULD be set to `_OTHER`
+ // .
+ GenAISystemKey = attribute.Key("gen_ai.system")
+
+ // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type"
+ // semantic conventions. It represents the type of token being counted.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "input", "output"
+ GenAITokenTypeKey = attribute.Key("gen_ai.token.type")
+
+ // GenAIToolCallIDKey is the attribute Key conforming to the
+ // "gen_ai.tool.call.id" semantic conventions. It represents the tool call
+ // identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4"
+ GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id")
+
+ // GenAIToolDescriptionKey is the attribute Key conforming to the
+ // "gen_ai.tool.description" semantic conventions. It represents the tool
+ // description.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Multiply two numbers"
+ GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description")
+
+ // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name"
+ // semantic conventions. It represents the name of the tool utilized by the
+ // agent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Flights"
+ GenAIToolNameKey = attribute.Key("gen_ai.tool.name")
+
+ // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type"
+ // semantic conventions. It represents the type of the tool utilized by the
+ // agent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "function", "extension", "datastore"
+ // Note: Extension: A tool executed on the agent-side to directly call external
+ // APIs, bridging the gap between the agent and real-world systems.
+ // Agent-side operations involve actions that are performed by the agent on the
+ // server or within the agent's controlled environment.
+ // Function: A tool executed on the client-side, where the agent generates
+ // parameters for a predefined function, and the client executes the logic.
+ // Client-side operations are actions taken on the user's end or within the
+ // client application.
+ // Datastore: A tool used by the agent to access and query structured or
+ // unstructured external data for retrieval-augmented tasks or knowledge
+ // updates.
+ GenAIToolTypeKey = attribute.Key("gen_ai.tool.type")
+
+ // GenAIUsageInputTokensKey is the attribute Key conforming to the
+ // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of
+ // tokens used in the GenAI input (prompt).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens")
+
+ // GenAIUsageOutputTokensKey is the attribute Key conforming to the
+ // "gen_ai.usage.output_tokens" semantic conventions. It represents the number
+ // of tokens used in the GenAI response (completion).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 180
+ GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens")
+)
+
+// GenAIAgentDescription returns an attribute KeyValue conforming to the
+// "gen_ai.agent.description" semantic conventions. It represents the free-form
+// description of the GenAI agent provided by the application.
+func GenAIAgentDescription(val string) attribute.KeyValue {
+ return GenAIAgentDescriptionKey.String(val)
+}
+
+// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id"
+// semantic conventions. It represents the unique identifier of the GenAI agent.
+func GenAIAgentID(val string) attribute.KeyValue {
+ return GenAIAgentIDKey.String(val)
+}
+
+// GenAIAgentName returns an attribute KeyValue conforming to the
+// "gen_ai.agent.name" semantic conventions. It represents the human-readable
+// name of the GenAI agent provided by the application.
+func GenAIAgentName(val string) attribute.KeyValue {
+ return GenAIAgentNameKey.String(val)
+}
+
+// GenAIConversationID returns an attribute KeyValue conforming to the
+// "gen_ai.conversation.id" semantic conventions. It represents the unique
+// identifier for a conversation (session, thread), used to store and correlate
+// messages within this conversation.
+func GenAIConversationID(val string) attribute.KeyValue {
+ return GenAIConversationIDKey.String(val)
+}
+
+// GenAIDataSourceID returns an attribute KeyValue conforming to the
+// "gen_ai.data_source.id" semantic conventions. It represents the data source
+// identifier.
+func GenAIDataSourceID(val string) attribute.KeyValue {
+ return GenAIDataSourceIDKey.String(val)
+}
+
+// GenAIOpenAIResponseServiceTier returns an attribute KeyValue conforming to the
+// "gen_ai.openai.response.service_tier" semantic conventions. It represents the
+// service tier used for the response.
+func GenAIOpenAIResponseServiceTier(val string) attribute.KeyValue {
+ return GenAIOpenAIResponseServiceTierKey.String(val)
+}
+
+// GenAIOpenAIResponseSystemFingerprint returns an attribute KeyValue conforming
+// to the "gen_ai.openai.response.system_fingerprint" semantic conventions. It
+// represents a fingerprint to track any eventual change in the Generative AI
+// environment.
+func GenAIOpenAIResponseSystemFingerprint(val string) attribute.KeyValue {
+ return GenAIOpenAIResponseSystemFingerprintKey.String(val)
+}
+
+// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the
+// "gen_ai.request.choice.count" semantic conventions. It represents the target
+// number of candidate completions to return.
+func GenAIRequestChoiceCount(val int) attribute.KeyValue {
+ return GenAIRequestChoiceCountKey.Int(val)
+}
+
+// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the
+// "gen_ai.request.encoding_formats" semantic conventions. It represents the
+// encoding formats requested in an embeddings operation, if specified.
+func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue {
+ return GenAIRequestEncodingFormatsKey.StringSlice(val)
+}
+
+// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the
+// "gen_ai.request.frequency_penalty" semantic conventions. It represents the
+// frequency penalty setting for the GenAI request.
+func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue {
+ return GenAIRequestFrequencyPenaltyKey.Float64(val)
+}
+
+// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the
+// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
+// number of tokens the model generates for a request.
+func GenAIRequestMaxTokens(val int) attribute.KeyValue {
+ return GenAIRequestMaxTokensKey.Int(val)
+}
+
+// GenAIRequestModel returns an attribute KeyValue conforming to the
+// "gen_ai.request.model" semantic conventions. It represents the name of the
+// GenAI model a request is being made to.
+func GenAIRequestModel(val string) attribute.KeyValue {
+ return GenAIRequestModelKey.String(val)
+}
+
+// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the
+// "gen_ai.request.presence_penalty" semantic conventions. It represents the
+// presence penalty setting for the GenAI request.
+func GenAIRequestPresencePenalty(val float64) attribute.KeyValue {
+ return GenAIRequestPresencePenaltyKey.Float64(val)
+}
+
+// GenAIRequestSeed returns an attribute KeyValue conforming to the
+// "gen_ai.request.seed" semantic conventions. It represents the requests with
+// same seed value more likely to return same result.
+func GenAIRequestSeed(val int) attribute.KeyValue {
+ return GenAIRequestSeedKey.Int(val)
+}
+
+// GenAIRequestStopSequences returns an attribute KeyValue conforming to the
+// "gen_ai.request.stop_sequences" semantic conventions. It represents the list
+// of sequences that the model will use to stop generating further tokens.
+func GenAIRequestStopSequences(val ...string) attribute.KeyValue {
+ return GenAIRequestStopSequencesKey.StringSlice(val)
+}
+
+// GenAIRequestTemperature returns an attribute KeyValue conforming to the
+// "gen_ai.request.temperature" semantic conventions. It represents the
+// temperature setting for the GenAI request.
+func GenAIRequestTemperature(val float64) attribute.KeyValue {
+ return GenAIRequestTemperatureKey.Float64(val)
+}
+
+// GenAIRequestTopK returns an attribute KeyValue conforming to the
+// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling
+// setting for the GenAI request.
+func GenAIRequestTopK(val float64) attribute.KeyValue {
+ return GenAIRequestTopKKey.Float64(val)
+}
+
+// GenAIRequestTopP returns an attribute KeyValue conforming to the
+// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling
+// setting for the GenAI request.
+func GenAIRequestTopP(val float64) attribute.KeyValue {
+ return GenAIRequestTopPKey.Float64(val)
+}
+
+// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the
+// "gen_ai.response.finish_reasons" semantic conventions. It represents the array
+// of reasons the model stopped generating tokens, corresponding to each
+// generation received.
+func GenAIResponseFinishReasons(val ...string) attribute.KeyValue {
+ return GenAIResponseFinishReasonsKey.StringSlice(val)
+}
+
+// GenAIResponseID returns an attribute KeyValue conforming to the
+// "gen_ai.response.id" semantic conventions. It represents the unique identifier
+// for the completion.
+func GenAIResponseID(val string) attribute.KeyValue {
+ return GenAIResponseIDKey.String(val)
+}
+
+// GenAIResponseModel returns an attribute KeyValue conforming to the
+// "gen_ai.response.model" semantic conventions. It represents the name of the
+// model that generated the response.
+func GenAIResponseModel(val string) attribute.KeyValue {
+ return GenAIResponseModelKey.String(val)
+}
+
+// GenAIToolCallID returns an attribute KeyValue conforming to the
+// "gen_ai.tool.call.id" semantic conventions. It represents the tool call
+// identifier.
+func GenAIToolCallID(val string) attribute.KeyValue {
+ return GenAIToolCallIDKey.String(val)
+}
+
+// GenAIToolDescription returns an attribute KeyValue conforming to the
+// "gen_ai.tool.description" semantic conventions. It represents the tool
+// description.
+func GenAIToolDescription(val string) attribute.KeyValue {
+ return GenAIToolDescriptionKey.String(val)
+}
+
+// GenAIToolName returns an attribute KeyValue conforming to the
+// "gen_ai.tool.name" semantic conventions. It represents the name of the tool
+// utilized by the agent.
+func GenAIToolName(val string) attribute.KeyValue {
+ return GenAIToolNameKey.String(val)
+}
+
+// GenAIToolType returns an attribute KeyValue conforming to the
+// "gen_ai.tool.type" semantic conventions. It represents the type of the tool
+// utilized by the agent.
+func GenAIToolType(val string) attribute.KeyValue {
+ return GenAIToolTypeKey.String(val)
+}
+
+// GenAIUsageInputTokens returns an attribute KeyValue conforming to the
+// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of
+// tokens used in the GenAI input (prompt).
+func GenAIUsageInputTokens(val int) attribute.KeyValue {
+ return GenAIUsageInputTokensKey.Int(val)
+}
+
+// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the
+// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of
+// tokens used in the GenAI response (completion).
+func GenAIUsageOutputTokens(val int) attribute.KeyValue {
+ return GenAIUsageOutputTokensKey.Int(val)
+}
+
+// Enum values for gen_ai.openai.request.service_tier
+var (
+ // The system will utilize scale tier credits until they are exhausted.
+ // Stability: development
+ GenAIOpenAIRequestServiceTierAuto = GenAIOpenAIRequestServiceTierKey.String("auto")
+ // The system will utilize the default scale tier.
+ // Stability: development
+ GenAIOpenAIRequestServiceTierDefault = GenAIOpenAIRequestServiceTierKey.String("default")
+)
+
+// Enum values for gen_ai.operation.name
+var (
+ // Chat completion operation such as [OpenAI Chat API]
+ // Stability: development
+ //
+ // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat
+ GenAIOperationNameChat = GenAIOperationNameKey.String("chat")
+ // Multimodal content generation operation such as [Gemini Generate Content]
+ // Stability: development
+ //
+ // [Gemini Generate Content]: https://ai.google.dev/api/generate-content
+ GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content")
+ // Text completions operation such as [OpenAI Completions API (Legacy)]
+ // Stability: development
+ //
+ // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions
+ GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion")
+ // Embeddings operation such as [OpenAI Create embeddings API]
+ // Stability: development
+ //
+ // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create
+ GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings")
+ // Create GenAI agent
+ // Stability: development
+ GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent")
+ // Invoke GenAI agent
+ // Stability: development
+ GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent")
+ // Execute a tool
+ // Stability: development
+ GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool")
+)
+
+// Enum values for gen_ai.output.type
+var (
+ // Plain text
+ // Stability: development
+ GenAIOutputTypeText = GenAIOutputTypeKey.String("text")
+ // JSON object with known or unknown schema
+ // Stability: development
+ GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json")
+ // Image
+ // Stability: development
+ GenAIOutputTypeImage = GenAIOutputTypeKey.String("image")
+ // Speech
+ // Stability: development
+ GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech")
+)
+
+// Enum values for gen_ai.system
+var (
+ // OpenAI
+ // Stability: development
+ GenAISystemOpenAI = GenAISystemKey.String("openai")
+ // Any Google generative AI endpoint
+ // Stability: development
+ GenAISystemGCPGenAI = GenAISystemKey.String("gcp.gen_ai")
+ // Vertex AI
+ // Stability: development
+ GenAISystemGCPVertexAI = GenAISystemKey.String("gcp.vertex_ai")
+ // Gemini
+ // Stability: development
+ GenAISystemGCPGemini = GenAISystemKey.String("gcp.gemini")
+ // Deprecated: Use 'gcp.vertex_ai' instead.
+ GenAISystemVertexAI = GenAISystemKey.String("vertex_ai")
+ // Deprecated: Use 'gcp.gemini' instead.
+ GenAISystemGemini = GenAISystemKey.String("gemini")
+ // Anthropic
+ // Stability: development
+ GenAISystemAnthropic = GenAISystemKey.String("anthropic")
+ // Cohere
+ // Stability: development
+ GenAISystemCohere = GenAISystemKey.String("cohere")
+ // Azure AI Inference
+ // Stability: development
+ GenAISystemAzAIInference = GenAISystemKey.String("az.ai.inference")
+ // Azure OpenAI
+ // Stability: development
+ GenAISystemAzAIOpenAI = GenAISystemKey.String("az.ai.openai")
+ // IBM Watsonx AI
+ // Stability: development
+ GenAISystemIBMWatsonxAI = GenAISystemKey.String("ibm.watsonx.ai")
+ // AWS Bedrock
+ // Stability: development
+ GenAISystemAWSBedrock = GenAISystemKey.String("aws.bedrock")
+ // Perplexity
+ // Stability: development
+ GenAISystemPerplexity = GenAISystemKey.String("perplexity")
+ // xAI
+ // Stability: development
+ GenAISystemXai = GenAISystemKey.String("xai")
+ // DeepSeek
+ // Stability: development
+ GenAISystemDeepseek = GenAISystemKey.String("deepseek")
+ // Groq
+ // Stability: development
+ GenAISystemGroq = GenAISystemKey.String("groq")
+ // Mistral AI
+ // Stability: development
+ GenAISystemMistralAI = GenAISystemKey.String("mistral_ai")
+)
+
+// Enum values for gen_ai.token.type
+var (
+ // Input tokens (prompt, input, etc.)
+ // Stability: development
+ GenAITokenTypeInput = GenAITokenTypeKey.String("input")
+ // Deprecated: Replaced by `output`.
+ GenAITokenTypeCompletion = GenAITokenTypeKey.String("output")
+ // Output tokens (completion, response, etc.)
+ // Stability: development
+ GenAITokenTypeOutput = GenAITokenTypeKey.String("output")
+)
+
+// Namespace: geo
+const (
+ // GeoContinentCodeKey is the attribute Key conforming to the
+ // "geo.continent.code" semantic conventions. It represents the two-letter code
+ // representing continent’s name.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ GeoContinentCodeKey = attribute.Key("geo.continent.code")
+
+ // GeoCountryISOCodeKey is the attribute Key conforming to the
+ // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO
+ // Country Code ([ISO 3166-1 alpha2]).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CA"
+ //
+ // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes
+ GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code")
+
+ // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name"
+ // semantic conventions. It represents the locality name. Represents the name of
+ // a city, town, village, or similar populated place.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Montreal", "Berlin"
+ GeoLocalityNameKey = attribute.Key("geo.locality.name")
+
+ // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat"
+ // semantic conventions. It represents the latitude of the geo location in
+ // [WGS84].
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 45.505918
+ //
+ // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+ GeoLocationLatKey = attribute.Key("geo.location.lat")
+
+ // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon"
+ // semantic conventions. It represents the longitude of the geo location in
+ // [WGS84].
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: -73.61483
+ //
+ // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+ GeoLocationLonKey = attribute.Key("geo.location.lon")
+
+ // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code"
+ // semantic conventions. It represents the postal code associated with the
+ // location. Values appropriate for this field may also be known as a postcode
+ // or ZIP code and will vary widely from country to country.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "94040"
+ GeoPostalCodeKey = attribute.Key("geo.postal_code")
+
+ // GeoRegionISOCodeKey is the attribute Key conforming to the
+ // "geo.region.iso_code" semantic conventions. It represents the region ISO code
+ // ([ISO 3166-2]).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CA-QC"
+ //
+ // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2
+ GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code")
+)
+
+// GeoCountryISOCode returns an attribute KeyValue conforming to the
+// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO
+// Country Code ([ISO 3166-1 alpha2]).
+//
+// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes
+func GeoCountryISOCode(val string) attribute.KeyValue {
+ return GeoCountryISOCodeKey.String(val)
+}
+
+// GeoLocalityName returns an attribute KeyValue conforming to the
+// "geo.locality.name" semantic conventions. It represents the locality name.
+// Represents the name of a city, town, village, or similar populated place.
+func GeoLocalityName(val string) attribute.KeyValue {
+ return GeoLocalityNameKey.String(val)
+}
+
+// GeoLocationLat returns an attribute KeyValue conforming to the
+// "geo.location.lat" semantic conventions. It represents the latitude of the geo
+// location in [WGS84].
+//
+// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+func GeoLocationLat(val float64) attribute.KeyValue {
+ return GeoLocationLatKey.Float64(val)
+}
+
+// GeoLocationLon returns an attribute KeyValue conforming to the
+// "geo.location.lon" semantic conventions. It represents the longitude of the
+// geo location in [WGS84].
+//
+// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+func GeoLocationLon(val float64) attribute.KeyValue {
+ return GeoLocationLonKey.Float64(val)
+}
+
+// GeoPostalCode returns an attribute KeyValue conforming to the
+// "geo.postal_code" semantic conventions. It represents the postal code
+// associated with the location. Values appropriate for this field may also be
+// known as a postcode or ZIP code and will vary widely from country to country.
+func GeoPostalCode(val string) attribute.KeyValue {
+ return GeoPostalCodeKey.String(val)
+}
+
+// GeoRegionISOCode returns an attribute KeyValue conforming to the
+// "geo.region.iso_code" semantic conventions. It represents the region ISO code
+// ([ISO 3166-2]).
+//
+// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2
+func GeoRegionISOCode(val string) attribute.KeyValue {
+ return GeoRegionISOCodeKey.String(val)
+}
+
+// Enum values for geo.continent.code
+var (
+ // Africa
+ // Stability: development
+ GeoContinentCodeAf = GeoContinentCodeKey.String("AF")
+ // Antarctica
+ // Stability: development
+ GeoContinentCodeAn = GeoContinentCodeKey.String("AN")
+ // Asia
+ // Stability: development
+ GeoContinentCodeAs = GeoContinentCodeKey.String("AS")
+ // Europe
+ // Stability: development
+ GeoContinentCodeEu = GeoContinentCodeKey.String("EU")
+ // North America
+ // Stability: development
+ GeoContinentCodeNa = GeoContinentCodeKey.String("NA")
+ // Oceania
+ // Stability: development
+ GeoContinentCodeOc = GeoContinentCodeKey.String("OC")
+ // South America
+ // Stability: development
+ GeoContinentCodeSa = GeoContinentCodeKey.String("SA")
+)
+
+// Namespace: go
+const (
+ // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type"
+ // semantic conventions. It represents the type of memory.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "other", "stack"
+ GoMemoryTypeKey = attribute.Key("go.memory.type")
+)
+
+// Enum values for go.memory.type
+var (
+ // Memory allocated from the heap that is reserved for stack space, whether or
+ // not it is currently in-use.
+ // Stability: development
+ GoMemoryTypeStack = GoMemoryTypeKey.String("stack")
+ // Memory used by the Go runtime, excluding other categories of memory usage
+ // described in this enumeration.
+ // Stability: development
+ GoMemoryTypeOther = GoMemoryTypeKey.String("other")
+)
+
+// Namespace: graphql
+const (
+ // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document"
+ // semantic conventions. It represents the GraphQL document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: query findBookById { bookById(id: ?) { name } }
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphQLDocumentKey = attribute.Key("graphql.document")
+
+ // GraphQLOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of the
+ // operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: findBookById
+ GraphQLOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphQLOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of the
+ // operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "query", "mutation", "subscription"
+ GraphQLOperationTypeKey = attribute.Key("graphql.operation.type")
+)
+
+// GraphQLDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphQLDocument(val string) attribute.KeyValue {
+ return GraphQLDocumentKey.String(val)
+}
+
+// GraphQLOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphQLOperationName(val string) attribute.KeyValue {
+ return GraphQLOperationNameKey.String(val)
+}
+
+// Enum values for graphql.operation.type
+var (
+ // GraphQL query
+ // Stability: development
+ GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query")
+ // GraphQL mutation
+ // Stability: development
+ GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ // Stability: development
+ GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription")
+)
+
+// Namespace: heroku
+const (
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da"
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit hash
+ // for the current release.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "e6134959463efd8966b20e75b913cafe3f5ec"
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents the
+ // time and date the release was created.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2022-10-23T18:00:42Z"
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+)
+
+// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id"
+// semantic conventions. It represents the unique identifier for the application.
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release.
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the
+// "heroku.release.creation_timestamp" semantic conventions. It represents the
+// time and date the release was created.
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// Namespace: host
+const (
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is running
+ // on.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostCPUCacheL2SizeKey is the attribute Key conforming to the
+ // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+ // level 2 memory cache available to the processor (in Bytes).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 12288000
+ HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
+
+ // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family"
+ // semantic conventions. It represents the family or generation of the CPU.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "6", "PA-RISC 1.1e"
+ HostCPUFamilyKey = attribute.Key("host.cpu.family")
+
+ // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id"
+ // semantic conventions. It represents the model identifier. It provides more
+ // granular information about the CPU, distinguishing it from other CPUs within
+ // the same family.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "6", "9000/778/B180L"
+ HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
+
+ // HostCPUModelNameKey is the attribute Key conforming to the
+ // "host.cpu.model.name" semantic conventions. It represents the model
+ // designation of the processor.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz"
+ HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
+
+ // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping"
+ // semantic conventions. It represents the stepping or core revisions.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1", "r1p1"
+ HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
+
+ // HostCPUVendorIDKey is the attribute Key conforming to the
+ // "host.cpu.vendor.id" semantic conventions. It represents the processor
+ // manufacturer identifier. A maximum 12-character string.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "GenuineIntel"
+ // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX
+ // registers. Writing these to memory in this order results in a 12-character
+ // string.
+ //
+ // [CPUID]: https://wiki.osdev.org/CPUID
+ HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
+
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be the
+ // instance_id assigned by the cloud provider. For non-containerized systems,
+ // this should be the `machine-id`. See the table below for the sources to use
+ // to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "fdbf79e8af94cb7f9e8df36789187052"
+ HostIDKey = attribute.Key("host.id")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the VM image ID or host OS image ID. For
+ // Cloud, this value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ami-07b06b442921831e5"
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageNameKey is the attribute Key conforming to the "host.image.name"
+ // semantic conventions. It represents the name of the VM image or OS install
+ // the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905"
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version string
+ // of the VM image or host OS as defined in [Version Attributes].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0.1"
+ //
+ // [Version Attributes]: /docs/resource/README.md#version-attributes
+ HostImageVersionKey = attribute.Key("host.image.version")
+
+ // HostIPKey is the attribute Key conforming to the "host.ip" semantic
+ // conventions. It represents the available IP addresses of the host, excluding
+ // loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e"
+ // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
+ // addresses MUST be specified in the [RFC 5952] format.
+ //
+ // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html
+ HostIPKey = attribute.Key("host.ip")
+
+ // HostMacKey is the attribute Key conforming to the "host.mac" semantic
+ // conventions. It represents the available MAC addresses of the host, excluding
+ // loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F"
+ // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as
+ // hyphen-separated octets in uppercase hexadecimal form from most to least
+ // significant.
+ //
+ // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf
+ HostMacKey = attribute.Key("host.mac")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified hostname,
+ // or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-test"
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "n1-standard-1"
+ HostTypeKey = attribute.Key("host.type")
+)
+
+// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
+// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+// level 2 memory cache available to the processor (in Bytes).
+func HostCPUCacheL2Size(val int) attribute.KeyValue {
+ return HostCPUCacheL2SizeKey.Int(val)
+}
+
+// HostCPUFamily returns an attribute KeyValue conforming to the
+// "host.cpu.family" semantic conventions. It represents the family or generation
+// of the CPU.
+func HostCPUFamily(val string) attribute.KeyValue {
+ return HostCPUFamilyKey.String(val)
+}
+
+// HostCPUModelID returns an attribute KeyValue conforming to the
+// "host.cpu.model.id" semantic conventions. It represents the model identifier.
+// It provides more granular information about the CPU, distinguishing it from
+// other CPUs within the same family.
+func HostCPUModelID(val string) attribute.KeyValue {
+ return HostCPUModelIDKey.String(val)
+}
+
+// HostCPUModelName returns an attribute KeyValue conforming to the
+// "host.cpu.model.name" semantic conventions. It represents the model
+// designation of the processor.
+func HostCPUModelName(val string) attribute.KeyValue {
+ return HostCPUModelNameKey.String(val)
+}
+
+// HostCPUStepping returns an attribute KeyValue conforming to the
+// "host.cpu.stepping" semantic conventions. It represents the stepping or core
+// revisions.
+func HostCPUStepping(val string) attribute.KeyValue {
+ return HostCPUSteppingKey.String(val)
+}
+
+// HostCPUVendorID returns an attribute KeyValue conforming to the
+// "host.cpu.vendor.id" semantic conventions. It represents the processor
+// manufacturer identifier. A maximum 12-character string.
+func HostCPUVendorID(val string) attribute.KeyValue {
+ return HostCPUVendorIDKey.String(val)
+}
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use to
+// determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the "host.image.id"
+// semantic conventions. It represents the VM image ID or host OS image ID. For
+// Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM image
+// or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string of
+// the VM image or host OS as defined in [Version Attributes].
+//
+// [Version Attributes]: /docs/resource/README.md#version-attributes
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
+// conventions. It represents the available IP addresses of the host, excluding
+// loopback interfaces.
+func HostIP(val ...string) attribute.KeyValue {
+ return HostIPKey.StringSlice(val)
+}
+
+// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic
+// conventions. It represents the available MAC addresses of the host, excluding
+// loopback interfaces.
+func HostMac(val ...string) attribute.KeyValue {
+ return HostMacKey.StringSlice(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name" semantic
+// conventions. It represents the name of the host. On Unix systems, it may
+// contain what the hostname command returns, or the fully qualified hostname, or
+// another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type" semantic
+// conventions. It represents the type of host. For Cloud, this must be the
+// machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// Enum values for host.arch
+var (
+ // AMD64
+ // Stability: development
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ // Stability: development
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ // Stability: development
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ // Stability: development
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ // Stability: development
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ // Stability: development
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ // Stability: development
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ // Stability: development
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// Namespace: http
+const (
+ // HTTPConnectionStateKey is the attribute Key conforming to the
+ // "http.connection.state" semantic conventions. It represents the state of the
+ // HTTP connection in the HTTP connection pool.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "active", "idle"
+ HTTPConnectionStateKey = attribute.Key("http.connection.state")
+
+ // HTTPRequestBodySizeKey is the attribute Key conforming to the
+ // "http.request.body.size" semantic conventions. It represents the size of the
+ // request payload body in bytes. This is the number of bytes transferred
+ // excluding headers and is often, but not always, present as the
+ // [Content-Length] header. For requests using transport encoding, this should
+ // be the compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+ HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
+
+ // HTTPRequestMethodKey is the attribute Key conforming to the
+ // "http.request.method" semantic conventions. It represents the HTTP request
+ // method.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "GET", "POST", "HEAD"
+ // Note: HTTP request method value SHOULD be "known" to the instrumentation.
+ // By default, this convention defines "known" methods as the ones listed in
+ // [RFC9110]
+ // and the PATCH method defined in [RFC5789].
+ //
+ // If the HTTP request method is not known to instrumentation, it MUST set the
+ // `http.request.method` attribute to `_OTHER`.
+ //
+ // If the HTTP instrumentation could end up converting valid HTTP request
+ // methods to `_OTHER`, then it MUST provide a way to override
+ // the list of known HTTP methods. If this override is done via environment
+ // variable, then the environment variable MUST be named
+ // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of
+ // case-sensitive known HTTP methods
+ // (this list MUST be a full override of the default known method, it is not a
+ // list of known methods in addition to the defaults).
+ //
+ // HTTP method names are case-sensitive and `http.request.method` attribute
+ // value MUST match a known HTTP method name exactly.
+ // Instrumentations for specific web frameworks that consider HTTP methods to be
+ // case insensitive, SHOULD populate a canonical equivalent.
+ // Tracing instrumentations that do so, MUST also set
+ // `http.request.method_original` to the original value.
+ //
+ // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods
+ // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html
+ HTTPRequestMethodKey = attribute.Key("http.request.method")
+
+ // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
+ // "http.request.method_original" semantic conventions. It represents the
+ // original HTTP method sent by the client in the request line.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "GeT", "ACL", "foo"
+ HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
+
+ // HTTPRequestResendCountKey is the attribute Key conforming to the
+ // "http.request.resend_count" semantic conventions. It represents the ordinal
+ // number of request resending attempt (for any reason, including redirects).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending (e.g.
+ // redirection, authorization failure, 503 Server Unavailable, network issues,
+ // or any other).
+ HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
+
+ // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size"
+ // semantic conventions. It represents the total size of the request in bytes.
+ // This should be the total number of bytes sent over the wire, including the
+ // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request
+ // body if any.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ HTTPRequestSizeKey = attribute.Key("http.request.size")
+
+ // HTTPResponseBodySizeKey is the attribute Key conforming to the
+ // "http.response.body.size" semantic conventions. It represents the size of the
+ // response payload body in bytes. This is the number of bytes transferred
+ // excluding headers and is often, but not always, present as the
+ // [Content-Length] header. For requests using transport encoding, this should
+ // be the compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+ HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
+
+ // HTTPResponseSizeKey is the attribute Key conforming to the
+ // "http.response.size" semantic conventions. It represents the total size of
+ // the response in bytes. This should be the total number of bytes sent over the
+ // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+ // headers, and response body and trailers if any.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ HTTPResponseSizeKey = attribute.Key("http.response.size")
+
+ // HTTPResponseStatusCodeKey is the attribute Key conforming to the
+ // "http.response.status_code" semantic conventions. It represents the
+ // [HTTP response status code].
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 200
+ //
+ // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+ HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic
+ // conventions. It represents the matched route, that is, the path template in
+ // the format used by the respective server framework.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "/users/:userID?", "{controller}/{action}/{id?}"
+ // Note: MUST NOT be populated when this is not supported by the HTTP server
+ // framework as the route attribute should have low-cardinality and the URI path
+ // can NOT substitute it.
+ // SHOULD include the [application root] if there is one.
+ //
+ // [application root]: /docs/http/http-spans.md#http-server-definitions
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+// HTTPRequestBodySize returns an attribute KeyValue conforming to the
+// "http.request.body.size" semantic conventions. It represents the size of the
+// request payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func HTTPRequestBodySize(val int) attribute.KeyValue {
+ return HTTPRequestBodySizeKey.Int(val)
+}
+
+// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
+// "http.request.method_original" semantic conventions. It represents the
+// original HTTP method sent by the client in the request line.
+func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
+ return HTTPRequestMethodOriginalKey.String(val)
+}
+
+// HTTPRequestResendCount returns an attribute KeyValue conforming to the
+// "http.request.resend_count" semantic conventions. It represents the ordinal
+// number of request resending attempt (for any reason, including redirects).
+func HTTPRequestResendCount(val int) attribute.KeyValue {
+ return HTTPRequestResendCountKey.Int(val)
+}
+
+// HTTPRequestSize returns an attribute KeyValue conforming to the
+// "http.request.size" semantic conventions. It represents the total size of the
+// request in bytes. This should be the total number of bytes sent over the wire,
+// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers,
+// and request body if any.
+func HTTPRequestSize(val int) attribute.KeyValue {
+ return HTTPRequestSizeKey.Int(val)
+}
+
+// HTTPResponseBodySize returns an attribute KeyValue conforming to the
+// "http.response.body.size" semantic conventions. It represents the size of the
+// response payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func HTTPResponseBodySize(val int) attribute.KeyValue {
+ return HTTPResponseBodySizeKey.Int(val)
+}
+
+// HTTPResponseSize returns an attribute KeyValue conforming to the
+// "http.response.size" semantic conventions. It represents the total size of the
+// response in bytes. This should be the total number of bytes sent over the
+// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+// headers, and response body and trailers if any.
+func HTTPResponseSize(val int) attribute.KeyValue {
+ return HTTPResponseSizeKey.Int(val)
+}
+
+// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
+// "http.response.status_code" semantic conventions. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func HTTPResponseStatusCode(val int) attribute.KeyValue {
+ return HTTPResponseStatusCodeKey.Int(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route, that is, the path
+// template in the format used by the respective server framework.
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Enum values for http.connection.state
+var (
+ // active state.
+ // Stability: development
+ HTTPConnectionStateActive = HTTPConnectionStateKey.String("active")
+ // idle state.
+ // Stability: development
+ HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle")
+)
+
+// Enum values for http.request.method
+var (
+ // CONNECT method.
+ // Stability: stable
+ HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
+ // DELETE method.
+ // Stability: stable
+ HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
+ // GET method.
+ // Stability: stable
+ HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
+ // HEAD method.
+ // Stability: stable
+ HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
+ // OPTIONS method.
+ // Stability: stable
+ HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
+ // PATCH method.
+ // Stability: stable
+ HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
+ // POST method.
+ // Stability: stable
+ HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
+ // PUT method.
+ // Stability: stable
+ HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
+ // TRACE method.
+ // Stability: stable
+ HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
+ // Any HTTP method that the instrumentation has no prior knowledge of.
+ // Stability: stable
+ HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
+)
+
+// Namespace: hw
+const (
+ // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions.
+ // It represents an identifier for the hardware component, unique within the
+ // monitored host.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "win32battery_battery_testsysa33_1"
+ HwIDKey = attribute.Key("hw.id")
+
+ // HwNameKey is the attribute Key conforming to the "hw.name" semantic
+ // conventions. It represents an easily-recognizable name for the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "eth0"
+ HwNameKey = attribute.Key("hw.name")
+
+ // HwParentKey is the attribute Key conforming to the "hw.parent" semantic
+ // conventions. It represents the unique identifier of the parent component
+ // (typically the `hw.id` attribute of the enclosure, or disk controller).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "dellStorage_perc_0"
+ HwParentKey = attribute.Key("hw.parent")
+
+ // HwStateKey is the attribute Key conforming to the "hw.state" semantic
+ // conventions. It represents the current state of the component.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwStateKey = attribute.Key("hw.state")
+
+ // HwTypeKey is the attribute Key conforming to the "hw.type" semantic
+ // conventions. It represents the type of the component.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: Describes the category of the hardware component for which `hw.state`
+ // is being reported. For example, `hw.type=temperature` along with
+ // `hw.state=degraded` would indicate that the temperature of the hardware
+ // component has been reported as `degraded`.
+ HwTypeKey = attribute.Key("hw.type")
+)
+
+// HwID returns an attribute KeyValue conforming to the "hw.id" semantic
+// conventions. It represents an identifier for the hardware component, unique
+// within the monitored host.
+func HwID(val string) attribute.KeyValue {
+ return HwIDKey.String(val)
+}
+
+// HwName returns an attribute KeyValue conforming to the "hw.name" semantic
+// conventions. It represents an easily-recognizable name for the hardware
+// component.
+func HwName(val string) attribute.KeyValue {
+ return HwNameKey.String(val)
+}
+
+// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic
+// conventions. It represents the unique identifier of the parent component
+// (typically the `hw.id` attribute of the enclosure, or disk controller).
+func HwParent(val string) attribute.KeyValue {
+ return HwParentKey.String(val)
+}
+
+// Enum values for hw.state
+var (
+ // Ok
+ // Stability: development
+ HwStateOk = HwStateKey.String("ok")
+ // Degraded
+ // Stability: development
+ HwStateDegraded = HwStateKey.String("degraded")
+ // Failed
+ // Stability: development
+ HwStateFailed = HwStateKey.String("failed")
+)
+
+// Enum values for hw.type
+var (
+ // Battery
+ // Stability: development
+ HwTypeBattery = HwTypeKey.String("battery")
+ // CPU
+ // Stability: development
+ HwTypeCPU = HwTypeKey.String("cpu")
+ // Disk controller
+ // Stability: development
+ HwTypeDiskController = HwTypeKey.String("disk_controller")
+ // Enclosure
+ // Stability: development
+ HwTypeEnclosure = HwTypeKey.String("enclosure")
+ // Fan
+ // Stability: development
+ HwTypeFan = HwTypeKey.String("fan")
+ // GPU
+ // Stability: development
+ HwTypeGpu = HwTypeKey.String("gpu")
+ // Logical disk
+ // Stability: development
+ HwTypeLogicalDisk = HwTypeKey.String("logical_disk")
+ // Memory
+ // Stability: development
+ HwTypeMemory = HwTypeKey.String("memory")
+ // Network
+ // Stability: development
+ HwTypeNetwork = HwTypeKey.String("network")
+ // Physical disk
+ // Stability: development
+ HwTypePhysicalDisk = HwTypeKey.String("physical_disk")
+ // Power supply
+ // Stability: development
+ HwTypePowerSupply = HwTypeKey.String("power_supply")
+ // Tape drive
+ // Stability: development
+ HwTypeTapeDrive = HwTypeKey.String("tape_drive")
+ // Temperature
+ // Stability: development
+ HwTypeTemperature = HwTypeKey.String("temperature")
+ // Voltage
+ // Stability: development
+ HwTypeVoltage = HwTypeKey.String("voltage")
+)
+
+// Namespace: ios
+const (
+ // IOSAppStateKey is the attribute Key conforming to the "ios.app.state"
+ // semantic conventions. It represents the this attribute represents the state
+ // of the application.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The iOS lifecycle states are defined in the
+ // [UIApplicationDelegate documentation], and from which the `OS terminology`
+ // column values are derived.
+ //
+ // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate
+ IOSAppStateKey = attribute.Key("ios.app.state")
+)
+
+// Enum values for ios.app.state
+var (
+ // The app has become `active`. Associated with UIKit notification
+ // `applicationDidBecomeActive`.
+ //
+ // Stability: development
+ IOSAppStateActive = IOSAppStateKey.String("active")
+ // The app is now `inactive`. Associated with UIKit notification
+ // `applicationWillResignActive`.
+ //
+ // Stability: development
+ IOSAppStateInactive = IOSAppStateKey.String("inactive")
+ // The app is now in the background. This value is associated with UIKit
+ // notification `applicationDidEnterBackground`.
+ //
+ // Stability: development
+ IOSAppStateBackground = IOSAppStateKey.String("background")
+ // The app is now in the foreground. This value is associated with UIKit
+ // notification `applicationWillEnterForeground`.
+ //
+ // Stability: development
+ IOSAppStateForeground = IOSAppStateKey.String("foreground")
+ // The app is about to terminate. Associated with UIKit notification
+ // `applicationWillTerminate`.
+ //
+ // Stability: development
+ IOSAppStateTerminate = IOSAppStateKey.String("terminate")
+)
+
+// Namespace: k8s
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name"
+ // semantic conventions. It represents the name of the cluster.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-cluster"
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+
+ // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid"
+ // semantic conventions. It represents a pseudo-ID for the cluster, set to the
+ // UID of the `kube-system` namespace.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever
+ // added, we will recommend collecting the `k8s.cluster.uid` through the
+ // official APIs. In the meantime, we are able to use the `uid` of the
+ // `kube-system` namespace as a proxy for cluster ID. Read on for the
+ // rationale.
+ //
+ // Every object created in a K8s cluster is assigned a distinct UID. The
+ // `kube-system` namespace is used by Kubernetes itself and will exist
+ // for the lifetime of the cluster. Using the `uid` of the `kube-system`
+ // namespace is a reasonable proxy for the K8s ClusterID as it will only
+ // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
+ // UUIDs as standardized by
+ // [ISO/IEC 9834-8 and ITU-T X.667].
+ // Which states:
+ //
+ // > If generated according to one of the mechanisms defined in Rec.
+ // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
+ // > different from all other UUIDs generated before 3603 A.D., or is
+ // > extremely likely to be different (depending on the mechanism chosen).
+ //
+ // Therefore, UIDs between clusters should be extremely unlikely to
+ // conflict.
+ //
+ // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html
+ K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
+
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "redis"
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the number
+ // of times the container was restarted. This attribute can be used to identify
+ // a particular container (running or stopped) within a container spec.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+
+ // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to
+ // the "k8s.container.status.last_terminated_reason" semantic conventions. It
+ // represents the last terminated reason of the Container.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Evicted", "Error"
+ K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name"
+ // semantic conventions. It represents the name of the CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+
+ // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid"
+ // semantic conventions. It represents the UID of the CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid"
+ // semantic conventions. It represents the UID of the DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic
+ // conventions. It represents the name of the horizontal pod autoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SHPANameKey = attribute.Key("k8s.hpa.name")
+
+ // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic
+ // conventions. It represents the UID of the horizontal pod autoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SHPAUIDKey = attribute.Key("k8s.hpa.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic
+ // conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic
+ // conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "default"
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+
+ // K8SNamespacePhaseKey is the attribute Key conforming to the
+ // "k8s.namespace.phase" semantic conventions. It represents the phase of the
+ // K8s namespace.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "active", "terminating"
+ // Note: This attribute aligns with the `phase` field of the
+ // [K8s NamespaceStatus]
+ //
+ // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core
+ K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase")
+
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "node-1"
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic
+ // conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2"
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic
+ // conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-pod-autoconf"
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic
+ // conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SReplicationControllerNameKey is the attribute Key conforming to the
+ // "k8s.replicationcontroller.name" semantic conventions. It represents the name
+ // of the replication controller.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name")
+
+ // K8SReplicationControllerUIDKey is the attribute Key conforming to the
+ // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID
+ // of the replication controller.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid")
+
+ // K8SResourceQuotaNameKey is the attribute Key conforming to the
+ // "k8s.resourcequota.name" semantic conventions. It represents the name of the
+ // resource quota.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name")
+
+ // K8SResourceQuotaUIDKey is the attribute Key conforming to the
+ // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the
+ // resource quota.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+
+ // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name"
+ // semantic conventions. It represents the name of the K8s volume.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "volume0"
+ K8SVolumeNameKey = attribute.Key("k8s.volume.name")
+
+ // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type"
+ // semantic conventions. It represents the type of the K8s volume.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "emptyDir", "persistentVolumeClaim"
+ K8SVolumeTypeKey = attribute.Key("k8s.volume.type")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// K8SClusterUID returns an attribute KeyValue conforming to the
+// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
+// cluster, set to the UID of the `kube-system` namespace.
+func K8SClusterUID(val string) attribute.KeyValue {
+ return K8SClusterUIDKey.String(val)
+}
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify a
+// particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue
+// conforming to the "k8s.container.status.last_terminated_reason" semantic
+// conventions. It represents the last terminated reason of the Container.
+func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue {
+ return K8SContainerStatusLastTerminatedReasonKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name"
+// semantic conventions. It represents the name of the horizontal pod autoscaler.
+func K8SHPAName(val string) attribute.KeyValue {
+ return K8SHPANameKey.String(val)
+}
+
+// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid"
+// semantic conventions. It represents the UID of the horizontal pod autoscaler.
+func K8SHPAUID(val string) attribute.KeyValue {
+ return K8SHPAUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name"
+// semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SReplicationControllerName returns an attribute KeyValue conforming to the
+// "k8s.replicationcontroller.name" semantic conventions. It represents the name
+// of the replication controller.
+func K8SReplicationControllerName(val string) attribute.KeyValue {
+ return K8SReplicationControllerNameKey.String(val)
+}
+
+// K8SReplicationControllerUID returns an attribute KeyValue conforming to the
+// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of
+// the replication controller.
+func K8SReplicationControllerUID(val string) attribute.KeyValue {
+ return K8SReplicationControllerUIDKey.String(val)
+}
+
+// K8SResourceQuotaName returns an attribute KeyValue conforming to the
+// "k8s.resourcequota.name" semantic conventions. It represents the name of the
+// resource quota.
+func K8SResourceQuotaName(val string) attribute.KeyValue {
+ return K8SResourceQuotaNameKey.String(val)
+}
+
+// K8SResourceQuotaUID returns an attribute KeyValue conforming to the
+// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the
+// resource quota.
+func K8SResourceQuotaUID(val string) attribute.KeyValue {
+ return K8SResourceQuotaUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// K8SVolumeName returns an attribute KeyValue conforming to the
+// "k8s.volume.name" semantic conventions. It represents the name of the K8s
+// volume.
+func K8SVolumeName(val string) attribute.KeyValue {
+ return K8SVolumeNameKey.String(val)
+}
+
+// Enum values for k8s.namespace.phase
+var (
+ // Active namespace phase as described by [K8s API]
+ // Stability: development
+ //
+ // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase
+ K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active")
+ // Terminating namespace phase as described by [K8s API]
+ // Stability: development
+ //
+ // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase
+ K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating")
+)
+
+// Enum values for k8s.volume.type
+var (
+ // A [persistentVolumeClaim] volume
+ // Stability: development
+ //
+ // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim
+ K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim")
+ // A [configMap] volume
+ // Stability: development
+ //
+ // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap
+ K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap")
+ // A [downwardAPI] volume
+ // Stability: development
+ //
+ // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi
+ K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI")
+ // An [emptyDir] volume
+ // Stability: development
+ //
+ // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir
+ K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir")
+ // A [secret] volume
+ // Stability: development
+ //
+ // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret
+ K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret")
+ // A [local] volume
+ // Stability: development
+ //
+ // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local
+ K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local")
+)
+
+// Namespace: linux
+const (
+ // LinuxMemorySlabStateKey is the attribute Key conforming to the
+ // "linux.memory.slab.state" semantic conventions. It represents the Linux Slab
+ // memory state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "reclaimable", "unreclaimable"
+ LinuxMemorySlabStateKey = attribute.Key("linux.memory.slab.state")
+)
+
+// Enum values for linux.memory.slab.state
+var (
+ // reclaimable
+ // Stability: development
+ LinuxMemorySlabStateReclaimable = LinuxMemorySlabStateKey.String("reclaimable")
+ // unreclaimable
+ // Stability: development
+ LinuxMemorySlabStateUnreclaimable = LinuxMemorySlabStateKey.String("unreclaimable")
+)
+
+// Namespace: log
+const (
+ // LogFileNameKey is the attribute Key conforming to the "log.file.name"
+ // semantic conventions. It represents the basename of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "audit.log"
+ LogFileNameKey = attribute.Key("log.file.name")
+
+ // LogFileNameResolvedKey is the attribute Key conforming to the
+ // "log.file.name_resolved" semantic conventions. It represents the basename of
+ // the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "uuid.log"
+ LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+ // LogFilePathKey is the attribute Key conforming to the "log.file.path"
+ // semantic conventions. It represents the full path to the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/var/log/mysql/audit.log"
+ LogFilePathKey = attribute.Key("log.file.path")
+
+ // LogFilePathResolvedKey is the attribute Key conforming to the
+ // "log.file.path_resolved" semantic conventions. It represents the full path to
+ // the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/var/lib/docker/uuid.log"
+ LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+
+ // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic
+ // conventions. It represents the stream associated with the log. See below for
+ // a list of well-known values.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ LogIostreamKey = attribute.Key("log.iostream")
+
+ // LogRecordOriginalKey is the attribute Key conforming to the
+ // "log.record.original" semantic conventions. It represents the complete
+ // original Log Record.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - -
+ // Something happened", "[INFO] 8/3/24 12:34:56 Something happened"
+ // Note: This value MAY be added when processing a Log Record which was
+ // originally transmitted as a string or equivalent data type AND the Body field
+ // of the Log Record does not contain the same value. (e.g. a syslog or a log
+ // record read from a file.)
+ LogRecordOriginalKey = attribute.Key("log.record.original")
+
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log Record.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV"
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an
+ // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other
+ // identifiers (e.g. UUID) may be used as needed.
+ //
+ // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the "log.file.name"
+// semantic conventions. It represents the basename of the file.
+func LogFileName(val string) attribute.KeyValue {
+ return LogFileNameKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+ return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the "log.file.path"
+// semantic conventions. It represents the full path to the file.
+func LogFilePath(val string) attribute.KeyValue {
+ return LogFilePathKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path to
+// the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+ return LogFilePathResolvedKey.String(val)
+}
+
+// LogRecordOriginal returns an attribute KeyValue conforming to the
+// "log.record.original" semantic conventions. It represents the complete
+// original Log Record.
+func LogRecordOriginal(val string) attribute.KeyValue {
+ return LogRecordOriginalKey.String(val)
+}
+
+// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid"
+// semantic conventions. It represents a unique identifier for the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Enum values for log.iostream
+var (
+ // Logs from stdout stream
+ // Stability: development
+ LogIostreamStdout = LogIostreamKey.String("stdout")
+ // Events from stderr stream
+ // Stability: development
+ LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// Namespace: messaging
+const (
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the batching
+ // operation.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client library
+ // supports both batch and single-message API for the same operation,
+ // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs
+ // and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+
+ // MessagingClientIDKey is the attribute Key conforming to the
+ // "messaging.client.id" semantic conventions. It represents a unique identifier
+ // for the client that consumes or produces a message.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "client-5", "myhost@8742@s8083jm"
+ MessagingClientIDKey = attribute.Key("messaging.client.id")
+
+ // MessagingConsumerGroupNameKey is the attribute Key conforming to the
+ // "messaging.consumer.group.name" semantic conventions. It represents the name
+ // of the consumer group with which a consumer is associated.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-group", "indexer"
+ // Note: Semantic conventions for individual messaging systems SHOULD document
+ // whether `messaging.consumer.group.name` is applicable and what it means in
+ // the context of that system.
+ MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the message
+ // destination name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MyQueue", "MyTopic"
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic or
+ // other entity within the broker. If
+ // the broker doesn't have such notion, the destination name SHOULD uniquely
+ // identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationPartitionIDKey is the attribute Key conforming to the
+ // "messaging.destination.partition.id" semantic conventions. It represents the
+ // identifier of the partition messages are sent to or received from, unique
+ // within the `messaging.destination.name`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1
+ MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id")
+
+ // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to
+ // the "messaging.destination.subscription.name" semantic conventions. It
+ // represents the name of the destination subscription from which a message is
+ // consumed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "subscription-a"
+ // Note: Semantic conventions for individual messaging systems SHOULD document
+ // whether `messaging.destination.subscription.name` is applicable and what it
+ // means in the context of that system.
+ MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the low
+ // cardinality representation of the messaging destination name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/customers/{customerId}"
+ // Note: Destination names could be constructed from templates. An example would
+ // be a destination name involving a user name or product id. Although the
+ // destination name in this case is of high cardinality, the underlying template
+ // is of low cardinality and can be effectively used for grouping and
+ // aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might not
+ // exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to
+ // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It
+ // represents the UTC epoch seconds at which the message has been accepted and
+ // stored in the entity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time")
+
+ // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to
+ // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It
+ // represents the ack deadline in seconds set for the modify ack deadline
+ // request.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline")
+
+ // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the
+ // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the
+ // ack id for a given message.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: ack_id
+ MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id")
+
+ // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming
+ // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions.
+ // It represents the delivery attempt for a given message.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt")
+
+ // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to
+ // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It
+ // represents the ordering key for a given message. If the attribute is not
+ // present, the message does not have an ordering key.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: ordering_key
+ MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
+
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the message
+ // keys in Kafka are used for grouping alike messages to ensure they're
+ // processed on the same partition. They differ from `messaging.message.id` in
+ // that they're not unique. If the key is `null`, the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myKey
+ // Note: If the key type is not string, it's string representation has to be
+ // supplied for the attribute. If the key has no unambiguous, canonical string
+ // form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents a
+ // boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+
+ // MessagingKafkaOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.offset" semantic conventions. It represents the offset of a
+ // record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset")
+
+ // MessagingMessageBodySizeKey is the attribute Key conforming to the
+ // "messaging.message.body.size" semantic conventions. It represents the size of
+ // the message body in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Note: This can refer to both the compressed or uncompressed body size. If
+ // both sizes are known, the uncompressed
+ // body size should be used.
+ MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents the
+ // conversation ID identifying the conversation to which the message belongs,
+ // represented as a string. Sometimes called "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: MyConversationId
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
+ // "messaging.message.envelope.size" semantic conventions. It represents the
+ // size of the message body and metadata in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Note: This can refer to both the compressed or uncompressed size. If both
+ // sizes are known, the uncompressed
+ // size should be used.
+ MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
+
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used by
+ // the messaging system as an identifier for the message, represented as a
+ // string.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 452a7c7c7c7048c2f887f61572b18fc2
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingOperationNameKey is the attribute Key conforming to the
+ // "messaging.operation.name" semantic conventions. It represents the
+ // system-specific name of the messaging operation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ack", "nack", "send"
+ MessagingOperationNameKey = attribute.Key("messaging.operation.name")
+
+ // MessagingOperationTypeKey is the attribute Key conforming to the
+ // "messaging.operation.type" semantic conventions. It represents a string
+ // identifying the type of the messaging operation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationTypeKey = attribute.Key("messaging.operation.type")
+
+ // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to
+ // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It
+ // represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myKey
+ MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+
+ // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the
+ // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents
+ // the rabbitMQ message delivery tag.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag")
+
+ // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the
+ // "messaging.rocketmq.consumption_model" semantic conventions. It represents
+ // the model of message consumption. This only applies to consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+
+ // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It
+ // represents the delay time level for delay message, which determines the
+ // message delay time.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming
+ // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions.
+ // It represents the timestamp in milliseconds that the delay message is
+ // expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents the it
+ // is essential for FIFO message. Messages that belong to the same message group
+ // are always processed one by one within the same consumer group.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myMessageGroup
+ MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents the
+ // key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "keyA", "keyB"
+ MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketMQMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: tagA
+ MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents the
+ // type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketMQNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myNamespace
+ MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to
+ // the "messaging.servicebus.disposition_status" semantic conventions. It
+ // represents the describes the [settlement type].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock
+ MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status")
+
+ // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to
+ // the "messaging.servicebus.message.delivery_count" semantic conventions. It
+ // represents the number of deliveries that have been attempted for this
+ // message.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count")
+
+ // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to
+ // the "messaging.servicebus.message.enqueued_time" semantic conventions. It
+ // represents the UTC epoch seconds at which the message has been accepted and
+ // stored in the entity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time")
+
+ // MessagingSystemKey is the attribute Key conforming to the "messaging.system"
+ // semantic conventions. It represents the messaging system as identified by the
+ // client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The actual messaging system may differ from the one known by the
+ // client. For example, when using Kafka client libraries to communicate with
+ // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the
+ // instrumentation's best knowledge.
+ MessagingSystemKey = attribute.Key("messaging.system")
+)
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to the
+// "messaging.batch.message_count" semantic conventions. It represents the number
+// of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// MessagingClientID returns an attribute KeyValue conforming to the
+// "messaging.client.id" semantic conventions. It represents a unique identifier
+// for the client that consumes or produces a message.
+func MessagingClientID(val string) attribute.KeyValue {
+ return MessagingClientIDKey.String(val)
+}
+
+// MessagingConsumerGroupName returns an attribute KeyValue conforming to the
+// "messaging.consumer.group.name" semantic conventions. It represents the name
+// of the consumer group with which a consumer is associated.
+func MessagingConsumerGroupName(val string) attribute.KeyValue {
+ return MessagingConsumerGroupNameKey.String(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the
+// "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be unnamed
+// or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name.
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationPartitionID returns an attribute KeyValue conforming to
+// the "messaging.destination.partition.id" semantic conventions. It represents
+// the identifier of the partition messages are sent to or received from, unique
+// within the `messaging.destination.name`.
+func MessagingDestinationPartitionID(val string) attribute.KeyValue {
+ return MessagingDestinationPartitionIDKey.String(val)
+}
+
+// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming
+// to the "messaging.destination.subscription.name" semantic conventions. It
+// represents the name of the destination subscription from which a message is
+// consumed.
+func MessagingDestinationSubscriptionName(val string) attribute.KeyValue {
+ return MessagingDestinationSubscriptionNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to the
+// "messaging.destination.template" semantic conventions. It represents the low
+// cardinality representation of the messaging destination name.
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to the
+// "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming
+// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It
+// represents the UTC epoch seconds at which the message has been accepted and
+// stored in the entity.
+func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue {
+ return MessagingEventHubsMessageEnqueuedTimeKey.Int(val)
+}
+
+// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming
+// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It
+// represents the ack deadline in seconds set for the modify ack deadline
+// request.
+func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue {
+ return MessagingGCPPubSubMessageAckDeadlineKey.Int(val)
+}
+
+// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the
+// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the
+// ack id for a given message.
+func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue {
+ return MessagingGCPPubSubMessageAckIDKey.String(val)
+}
+
+// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic
+// conventions. It represents the delivery attempt for a given message.
+func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue {
+ return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val)
+}
+
+// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming
+// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It
+// represents the ordering key for a given message. If the attribute is not
+// present, the message does not have an ordering key.
+func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue {
+ return MessagingGCPPubSubMessageOrderingKeyKey.String(val)
+}
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the message
+// keys in Kafka are used for grouping alike messages to ensure they're processed
+// on the same partition. They differ from `messaging.message.id` in that they're
+// not unique. If the key is `null`, the attribute MUST NOT be set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.tombstone" semantic conventions. It represents a
+// boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// MessagingKafkaOffset returns an attribute KeyValue conforming to the
+// "messaging.kafka.offset" semantic conventions. It represents the offset of a
+// record in the corresponding Kafka partition.
+func MessagingKafkaOffset(val int) attribute.KeyValue {
+ return MessagingKafkaOffsetKey.Int(val)
+}
+
+// MessagingMessageBodySize returns an attribute KeyValue conforming to the
+// "messaging.message.body.size" semantic conventions. It represents the size of
+// the message body in bytes.
+func MessagingMessageBodySize(val int) attribute.KeyValue {
+ return MessagingMessageBodySizeKey.Int(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming to the
+// "messaging.message.conversation_id" semantic conventions. It represents the
+// conversation ID identifying the conversation to which the message belongs,
+// represented as a string. Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the
+// "messaging.message.envelope.size" semantic conventions. It represents the size
+// of the message body and metadata in bytes.
+func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
+ return MessagingMessageEnvelopeSizeKey.Int(val)
+}
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by the
+// messaging system as an identifier for the message, represented as a string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingOperationName returns an attribute KeyValue conforming to the
+// "messaging.operation.name" semantic conventions. It represents the
+// system-specific name of the messaging operation.
+func MessagingOperationName(val string) attribute.KeyValue {
+ return MessagingOperationNameKey.String(val)
+}
+
+// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitMQDestinationRoutingKeyKey.String(val)
+}
+
+// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming
+// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It
+// represents the rabbitMQ message delivery tag.
+func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue {
+ return MessagingRabbitMQMessageDeliveryTagKey.Int(val)
+}
+
+// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketMQMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketMQMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.message.group" semantic conventions. It represents the it
+// is essential for FIFO message. Messages that belong to the same message group
+// are always processed one by one within the same consumer group.
+func MessagingRocketMQMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketMQMessageGroupKey.String(val)
+}
+
+// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.message.keys" semantic conventions. It represents the
+// key(s) of message, another way to mark message besides message id.
+func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketMQMessageKeysKey.StringSlice(val)
+}
+
+// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketMQMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketMQMessageTagKey.String(val)
+}
+
+// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketMQNamespace(val string) attribute.KeyValue {
+ return MessagingRocketMQNamespaceKey.String(val)
+}
+
+// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.delivery_count" semantic
+// conventions. It represents the number of deliveries that have been attempted
+// for this message.
+func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue {
+ return MessagingServiceBusMessageDeliveryCountKey.Int(val)
+}
+
+// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.enqueued_time" semantic
+// conventions. It represents the UTC epoch seconds at which the message has been
+// accepted and stored in the entity.
+func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue {
+ return MessagingServiceBusMessageEnqueuedTimeKey.Int(val)
+}
+
+// Enum values for messaging.operation.type
+var (
+ // A message is created. "Create" spans always refer to a single message and are
+ // used to provide a unique creation context for messages in batch sending
+ // scenarios.
+ //
+ // Stability: development
+ MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create")
+ // One or more messages are provided for sending to an intermediary. If a single
+ // message is sent, the context of the "Send" span can be used as the creation
+ // context and no "Create" span needs to be created.
+ //
+ // Stability: development
+ MessagingOperationTypeSend = MessagingOperationTypeKey.String("send")
+ // One or more messages are requested by a consumer. This operation refers to
+ // pull-based scenarios, where consumers explicitly call methods of messaging
+ // SDKs to receive messages.
+ //
+ // Stability: development
+ MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive")
+ // One or more messages are processed by a consumer.
+ //
+ // Stability: development
+ MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process")
+ // One or more messages are settled.
+ //
+ // Stability: development
+ MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle")
+ // Deprecated: Replaced by `process`.
+ MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("deliver")
+ // Deprecated: Replaced by `send`.
+ MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish")
+)
+
+// Enum values for messaging.rocketmq.consumption_model
+var (
+ // Clustering consumption model
+ // Stability: development
+ MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ // Stability: development
+ MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting")
+)
+
+// Enum values for messaging.rocketmq.message.type
+var (
+ // Normal message
+ // Stability: development
+ MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal")
+ // FIFO message
+ // Stability: development
+ MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo")
+ // Delay message
+ // Stability: development
+ MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay")
+ // Transaction message
+ // Stability: development
+ MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction")
+)
+
+// Enum values for messaging.servicebus.disposition_status
+var (
+ // Message is completed
+ // Stability: development
+ MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete")
+ // Message is abandoned
+ // Stability: development
+ MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon")
+ // Message is sent to dead letter queue
+ // Stability: development
+ MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter")
+ // Message is deferred
+ // Stability: development
+ MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer")
+)
+
+// Enum values for messaging.system
+var (
+ // Apache ActiveMQ
+ // Stability: development
+ MessagingSystemActiveMQ = MessagingSystemKey.String("activemq")
+ // Amazon Simple Queue Service (SQS)
+ // Stability: development
+ MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs")
+ // Azure Event Grid
+ // Stability: development
+ MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid")
+ // Azure Event Hubs
+ // Stability: development
+ MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs")
+ // Azure Service Bus
+ // Stability: development
+ MessagingSystemServiceBus = MessagingSystemKey.String("servicebus")
+ // Google Cloud Pub/Sub
+ // Stability: development
+ MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub")
+ // Java Message Service
+ // Stability: development
+ MessagingSystemJMS = MessagingSystemKey.String("jms")
+ // Apache Kafka
+ // Stability: development
+ MessagingSystemKafka = MessagingSystemKey.String("kafka")
+ // RabbitMQ
+ // Stability: development
+ MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq")
+ // Apache RocketMQ
+ // Stability: development
+ MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq")
+ // Apache Pulsar
+ // Stability: development
+ MessagingSystemPulsar = MessagingSystemKey.String("pulsar")
+)
+
+// Namespace: network
+const (
+ // NetworkCarrierICCKey is the attribute Key conforming to the
+ // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+ // alpha-2 2-character country code associated with the mobile carrier network.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: DE
+ NetworkCarrierICCKey = attribute.Key("network.carrier.icc")
+
+ // NetworkCarrierMCCKey is the attribute Key conforming to the
+ // "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+ // country code.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 310
+ NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc")
+
+ // NetworkCarrierMNCKey is the attribute Key conforming to the
+ // "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+ // network code.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 001
+ NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc")
+
+ // NetworkCarrierNameKey is the attribute Key conforming to the
+ // "network.carrier.name" semantic conventions. It represents the name of the
+ // mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: sprint
+ NetworkCarrierNameKey = attribute.Key("network.carrier.name")
+
+ // NetworkConnectionStateKey is the attribute Key conforming to the
+ // "network.connection.state" semantic conventions. It represents the state of
+ // network connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "close_wait"
+ // Note: Connection states are defined as part of the [rfc9293]
+ //
+ // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2
+ NetworkConnectionStateKey = attribute.Key("network.connection.state")
+
+ // NetworkConnectionSubtypeKey is the attribute Key conforming to the
+ // "network.connection.subtype" semantic conventions. It represents the this
+ // describes more details regarding the connection.type. It may be the type of
+ // cell technology connection, but it could be used for describing details about
+ // a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: LTE
+ NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
+
+ // NetworkConnectionTypeKey is the attribute Key conforming to the
+ // "network.connection.type" semantic conventions. It represents the internet
+ // connection type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: wifi
+ NetworkConnectionTypeKey = attribute.Key("network.connection.type")
+
+ // NetworkInterfaceNameKey is the attribute Key conforming to the
+ // "network.interface.name" semantic conventions. It represents the network
+ // interface name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "lo", "eth0"
+ NetworkInterfaceNameKey = attribute.Key("network.interface.name")
+
+ // NetworkIODirectionKey is the attribute Key conforming to the
+ // "network.io.direction" semantic conventions. It represents the network IO
+ // operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "transmit"
+ NetworkIODirectionKey = attribute.Key("network.io.direction")
+
+ // NetworkLocalAddressKey is the attribute Key conforming to the
+ // "network.local.address" semantic conventions. It represents the local address
+ // of the network connection - IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "10.1.2.80", "/tmp/my.sock"
+ NetworkLocalAddressKey = attribute.Key("network.local.address")
+
+ // NetworkLocalPortKey is the attribute Key conforming to the
+ // "network.local.port" semantic conventions. It represents the local port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 65123
+ NetworkLocalPortKey = attribute.Key("network.local.port")
+
+ // NetworkPeerAddressKey is the attribute Key conforming to the
+ // "network.peer.address" semantic conventions. It represents the peer address
+ // of the network connection - IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "10.1.2.80", "/tmp/my.sock"
+ NetworkPeerAddressKey = attribute.Key("network.peer.address")
+
+ // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port"
+ // semantic conventions. It represents the peer port number of the network
+ // connection.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 65123
+ NetworkPeerPortKey = attribute.Key("network.peer.port")
+
+ // NetworkProtocolNameKey is the attribute Key conforming to the
+ // "network.protocol.name" semantic conventions. It represents the
+ // [OSI application layer] or non-OSI equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "amqp", "http", "mqtt"
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+ NetworkProtocolNameKey = attribute.Key("network.protocol.name")
+
+ // NetworkProtocolVersionKey is the attribute Key conforming to the
+ // "network.protocol.version" semantic conventions. It represents the actual
+ // version of the protocol used for network communication.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "1.1", "2"
+ // Note: If protocol version is subject to negotiation (for example using [ALPN]
+ // ), this attribute SHOULD be set to the negotiated version. If the actual
+ // protocol version is not known, this attribute SHOULD NOT be set.
+ //
+ // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html
+ NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
+
+ // NetworkTransportKey is the attribute Key conforming to the
+ // "network.transport" semantic conventions. It represents the
+ // [OSI transport layer] or [inter-process communication method].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "tcp", "udp"
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // Consider always setting the transport when setting a port number, since
+ // a port number is ambiguous without knowing the transport. For example
+ // different processes could be listening on TCP port 12345 and UDP port 12345.
+ //
+ // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer
+ // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication
+ NetworkTransportKey = attribute.Key("network.transport")
+
+ // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic
+ // conventions. It represents the [OSI network layer] or non-OSI equivalent.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "ipv4", "ipv6"
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // [OSI network layer]: https://wikipedia.org/wiki/Network_layer
+ NetworkTypeKey = attribute.Key("network.type")
+)
+
+// NetworkCarrierICC returns an attribute KeyValue conforming to the
+// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetworkCarrierICC(val string) attribute.KeyValue {
+ return NetworkCarrierICCKey.String(val)
+}
+
+// NetworkCarrierMCC returns an attribute KeyValue conforming to the
+// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+// country code.
+func NetworkCarrierMCC(val string) attribute.KeyValue {
+ return NetworkCarrierMCCKey.String(val)
+}
+
+// NetworkCarrierMNC returns an attribute KeyValue conforming to the
+// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+// network code.
+func NetworkCarrierMNC(val string) attribute.KeyValue {
+ return NetworkCarrierMNCKey.String(val)
+}
+
+// NetworkCarrierName returns an attribute KeyValue conforming to the
+// "network.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetworkCarrierName(val string) attribute.KeyValue {
+ return NetworkCarrierNameKey.String(val)
+}
+
+// NetworkInterfaceName returns an attribute KeyValue conforming to the
+// "network.interface.name" semantic conventions. It represents the network
+// interface name.
+func NetworkInterfaceName(val string) attribute.KeyValue {
+ return NetworkInterfaceNameKey.String(val)
+}
+
+// NetworkLocalAddress returns an attribute KeyValue conforming to the
+// "network.local.address" semantic conventions. It represents the local address
+// of the network connection - IP address or Unix domain socket name.
+func NetworkLocalAddress(val string) attribute.KeyValue {
+ return NetworkLocalAddressKey.String(val)
+}
+
+// NetworkLocalPort returns an attribute KeyValue conforming to the
+// "network.local.port" semantic conventions. It represents the local port number
+// of the network connection.
+func NetworkLocalPort(val int) attribute.KeyValue {
+ return NetworkLocalPortKey.Int(val)
+}
+
+// NetworkPeerAddress returns an attribute KeyValue conforming to the
+// "network.peer.address" semantic conventions. It represents the peer address of
+// the network connection - IP address or Unix domain socket name.
+func NetworkPeerAddress(val string) attribute.KeyValue {
+ return NetworkPeerAddressKey.String(val)
+}
+
+// NetworkPeerPort returns an attribute KeyValue conforming to the
+// "network.peer.port" semantic conventions. It represents the peer port number
+// of the network connection.
+func NetworkPeerPort(val int) attribute.KeyValue {
+ return NetworkPeerPortKey.Int(val)
+}
+
+// NetworkProtocolName returns an attribute KeyValue conforming to the
+// "network.protocol.name" semantic conventions. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func NetworkProtocolName(val string) attribute.KeyValue {
+ return NetworkProtocolNameKey.String(val)
+}
+
+// NetworkProtocolVersion returns an attribute KeyValue conforming to the
+// "network.protocol.version" semantic conventions. It represents the actual
+// version of the protocol used for network communication.
+func NetworkProtocolVersion(val string) attribute.KeyValue {
+ return NetworkProtocolVersionKey.String(val)
+}
+
+// Enum values for network.connection.state
+var (
+ // closed
+ // Stability: development
+ NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed")
+ // close_wait
+ // Stability: development
+ NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait")
+ // closing
+ // Stability: development
+ NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing")
+ // established
+ // Stability: development
+ NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established")
+ // fin_wait_1
+ // Stability: development
+ NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1")
+ // fin_wait_2
+ // Stability: development
+ NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2")
+ // last_ack
+ // Stability: development
+ NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack")
+ // listen
+ // Stability: development
+ NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen")
+ // syn_received
+ // Stability: development
+ NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received")
+ // syn_sent
+ // Stability: development
+ NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent")
+ // time_wait
+ // Stability: development
+ NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait")
+)
+
+// Enum values for network.connection.subtype
+var (
+ // GPRS
+ // Stability: development
+ NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
+ // EDGE
+ // Stability: development
+ NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
+ // UMTS
+ // Stability: development
+ NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
+ // CDMA
+ // Stability: development
+ NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ // Stability: development
+ NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ // Stability: development
+ NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ // Stability: development
+ NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ // Stability: development
+ NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ // Stability: development
+ NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ // Stability: development
+ NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
+ // IDEN
+ // Stability: development
+ NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ // Stability: development
+ NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ // Stability: development
+ NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
+ // EHRPD
+ // Stability: development
+ NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ // Stability: development
+ NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
+ // GSM
+ // Stability: development
+ NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ // Stability: development
+ NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ // Stability: development
+ NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ // Stability: development
+ NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ // Stability: development
+ NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ // Stability: development
+ NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
+)
+
+// Enum values for network.connection.type
+var (
+ // wifi
+ // Stability: development
+ NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
+ // wired
+ // Stability: development
+ NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
+ // cell
+ // Stability: development
+ NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
+ // unavailable
+ // Stability: development
+ NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
+ // unknown
+ // Stability: development
+ NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
+)
+
+// Enum values for network.io.direction
+var (
+ // transmit
+ // Stability: development
+ NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit")
+ // receive
+ // Stability: development
+ NetworkIODirectionReceive = NetworkIODirectionKey.String("receive")
+)
+
+// Enum values for network.transport
+var (
+ // TCP
+ // Stability: stable
+ NetworkTransportTCP = NetworkTransportKey.String("tcp")
+ // UDP
+ // Stability: stable
+ NetworkTransportUDP = NetworkTransportKey.String("udp")
+ // Named or anonymous pipe.
+ // Stability: stable
+ NetworkTransportPipe = NetworkTransportKey.String("pipe")
+ // Unix domain socket
+ // Stability: stable
+ NetworkTransportUnix = NetworkTransportKey.String("unix")
+ // QUIC
+ // Stability: stable
+ NetworkTransportQUIC = NetworkTransportKey.String("quic")
+)
+
+// Enum values for network.type
+var (
+ // IPv4
+ // Stability: stable
+ NetworkTypeIPv4 = NetworkTypeKey.String("ipv4")
+ // IPv6
+ // Stability: stable
+ NetworkTypeIPv6 = NetworkTypeKey.String("ipv6")
+)
+
+// Namespace: oci
+const (
+ // OCIManifestDigestKey is the attribute Key conforming to the
+ // "oci.manifest.digest" semantic conventions. It represents the digest of the
+ // OCI image manifest. For container images specifically is the digest by which
+ // the container image is known.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4"
+ // Note: Follows [OCI Image Manifest Specification], and specifically the
+ // [Digest property].
+ // An example can be found in [Example Image Manifest].
+ //
+ // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md
+ // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests
+ // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest
+ OCIManifestDigestKey = attribute.Key("oci.manifest.digest")
+)
+
+// OCIManifestDigest returns an attribute KeyValue conforming to the
+// "oci.manifest.digest" semantic conventions. It represents the digest of the
+// OCI image manifest. For container images specifically is the digest by which
+// the container image is known.
+func OCIManifestDigest(val string) attribute.KeyValue {
+ return OCIManifestDigestKey.String(val)
+}
+
+// Namespace: opentracing
+const (
+ // OpenTracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the parent-child
+ // Reference type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+// Enum values for opentracing.ref_type
+var (
+ // The parent Span depends on the child Span in some capacity
+ // Stability: development
+ OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of")
+ // The parent Span doesn't depend in any way on the result of the child Span
+ // Stability: development
+ OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from")
+)
+
+// Namespace: os
+const (
+ // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic
+ // conventions. It represents the unique identifier for a particular build or
+ // compilation of the operating system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TQ3C.230805.001.B2", "20E247", "22621"
+ OSBuildIDKey = attribute.Key("os.build_id")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to be
+ // parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS"
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iOS", "Android", "Ubuntu"
+ OSNameKey = attribute.Key("os.name")
+
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version" semantic
+ // conventions. It represents the version string of the operating system as
+ // defined in [Version Attributes].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "14.2.1", "18.04.1"
+ //
+ // [Version Attributes]: /docs/resource/README.md#version-attributes
+ OSVersionKey = attribute.Key("os.version")
+)
+
+// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the operating system.
+func OSBuildID(val string) attribute.KeyValue {
+ return OSBuildIDKey.String(val)
+}
+
+// OSDescription returns an attribute KeyValue conforming to the "os.description"
+// semantic conventions. It represents the human readable (not intended to be
+// parsed) OS version information, like e.g. reported by `ver` or
+// `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating system
+// as defined in [Version Attributes].
+//
+// [Version Attributes]: /docs/resource/README.md#version-attributes
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// Enum values for os.type
+var (
+ // Microsoft Windows
+ // Stability: development
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ // Stability: development
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ // Stability: development
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ // Stability: development
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ // Stability: development
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ // Stability: development
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ // Stability: development
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ // Stability: development
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ // Stability: development
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ // Stability: development
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ // Stability: development
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// Namespace: otel
+const (
+ // OTelComponentNameKey is the attribute Key conforming to the
+ // "otel.component.name" semantic conventions. It represents a name uniquely
+ // identifying the instance of the OpenTelemetry component within its containing
+ // SDK instance.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otlp_grpc_span_exporter/0", "custom-name"
+ // Note: Implementations SHOULD ensure a low cardinality for this attribute,
+ // even across application or SDK restarts.
+ // E.g. implementations MUST NOT use UUIDs as values for this attribute.
+ //
+ // Implementations MAY achieve these goals by following a
+ // `/` pattern, e.g.
+ // `batching_span_processor/0`.
+ // Hereby `otel.component.type` refers to the corresponding attribute value of
+ // the component.
+ //
+ // The value of `instance-counter` MAY be automatically assigned by the
+ // component and uniqueness within the enclosing SDK instance MUST be
+ // guaranteed.
+ // For example, `` MAY be implemented by using a monotonically
+ // increasing counter (starting with `0`), which is incremented every time an
+ // instance of the given component type is started.
+ //
+ // With this implementation, for example the first Batching Span Processor would
+ // have `batching_span_processor/0`
+ // as `otel.component.name`, the second one `batching_span_processor/1` and so
+ // on.
+ // These values will therefore be reused in the case of an application restart.
+ OTelComponentNameKey = attribute.Key("otel.component.name")
+
+ // OTelComponentTypeKey is the attribute Key conforming to the
+ // "otel.component.type" semantic conventions. It represents a name identifying
+ // the type of the OpenTelemetry component.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "batching_span_processor", "com.example.MySpanExporter"
+ // Note: If none of the standardized values apply, implementations SHOULD use
+ // the language-defined name of the type.
+ // E.g. for Java the fully qualified classname SHOULD be used in this case.
+ OTelComponentTypeKey = attribute.Key("otel.component.type")
+
+ // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name"
+ // semantic conventions. It represents the name of the instrumentation scope - (
+ // `InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "io.opentelemetry.contrib.mongodb"
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of the
+ // instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "1.0.0"
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+
+ // OTelSpanSamplingResultKey is the attribute Key conforming to the
+ // "otel.span.sampling_result" semantic conventions. It represents the result
+ // value of the sampler for this span.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result")
+
+ // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code"
+ // semantic conventions. It represents the name of the code, either "OK" or
+ // "ERROR". MUST NOT be set if the status code is UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples:
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the description
+ // of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "resource not found"
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+// OTelComponentName returns an attribute KeyValue conforming to the
+// "otel.component.name" semantic conventions. It represents a name uniquely
+// identifying the instance of the OpenTelemetry component within its containing
+// SDK instance.
+func OTelComponentName(val string) attribute.KeyValue {
+ return OTelComponentNameKey.String(val)
+}
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the description
+// of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// Enum values for otel.component.type
+var (
+ // The builtin SDK batching span processor
+ //
+ // Stability: development
+ OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor")
+ // The builtin SDK simple span processor
+ //
+ // Stability: development
+ OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor")
+ // The builtin SDK batching log record processor
+ //
+ // Stability: development
+ OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor")
+ // The builtin SDK simple log record processor
+ //
+ // Stability: development
+ OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor")
+ // OTLP span exporter over gRPC with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter")
+ // OTLP span exporter over HTTP with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter")
+ // OTLP span exporter over HTTP with JSON serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter")
+ // OTLP log record exporter over gRPC with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter")
+ // OTLP log record exporter over HTTP with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter")
+ // OTLP log record exporter over HTTP with JSON serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter")
+ // The builtin SDK periodically exporting metric reader
+ //
+ // Stability: development
+ OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader")
+ // OTLP metric exporter over gRPC with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter")
+ // OTLP metric exporter over HTTP with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter")
+ // OTLP metric exporter over HTTP with JSON serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter")
+)
+
+// Enum values for otel.span.sampling_result
+var (
+ // The span is not sampled and not recording
+ // Stability: development
+ OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP")
+ // The span is not sampled, but recording
+ // Stability: development
+ OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY")
+ // The span is sampled and recording
+ // Stability: development
+ OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE")
+)
+
+// Enum values for otel.status_code
+var (
+ // The operation has been validated by an Application developer or Operator to
+ // have completed successfully.
+ // Stability: stable
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error.
+ // Stability: stable
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// Namespace: peer
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service" semantic
+ // conventions. It represents the [`service.name`] of the remote service. SHOULD
+ // be equal to the actual `service.name` resource attribute of the remote
+ // service if any.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: AuthTokenCache
+ //
+ // [`service.name`]: /docs/resource/README.md#service
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the "peer.service"
+// semantic conventions. It represents the [`service.name`] of the remote
+// service. SHOULD be equal to the actual `service.name` resource attribute of
+// the remote service if any.
+//
+// [`service.name`]: /docs/resource/README.md#service
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// Namespace: process
+const (
+ // ProcessArgsCountKey is the attribute Key conforming to the
+ // "process.args_count" semantic conventions. It represents the length of the
+ // process.command_args array.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 4
+ // Note: This field can be useful for querying or performing bucket analysis on
+ // how many arguments were provided to start a process. More arguments may be an
+ // indication of suspicious activity.
+ ProcessArgsCountKey = attribute.Key("process.args_count")
+
+ // ProcessCommandKey is the attribute Key conforming to the "process.command"
+ // semantic conventions. It represents the command used to launch the process
+ // (i.e. the command name). On Linux based systems, can be set to the zeroth
+ // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter
+ // extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cmd/otelcol"
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received by
+ // the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this
+ // would be the full argv vector passed to `main`. SHOULD NOT be collected by
+ // default unless there is sanitization that excludes sensitive data.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cmd/otecol", "--config=config.yaml"
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full command
+ // used to launch the process as a single string representing the full command.
+ // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if
+ // you have to assemble it just for monitoring; use `process.command_args`
+ // instead. SHOULD NOT be collected by default unless there is sanitization that
+ // excludes sensitive data.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "C:\cmd\otecol --config="my directory\config.yaml""
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessContextSwitchTypeKey is the attribute Key conforming to the
+ // "process.context_switch_type" semantic conventions. It represents the
+ // specifies whether the context switches for this data point were voluntary or
+ // involuntary.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type")
+
+ // ProcessCreationTimeKey is the attribute Key conforming to the
+ // "process.creation.time" semantic conventions. It represents the date and time
+ // the process was created, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2023-11-21T09:25:34.853Z"
+ ProcessCreationTimeKey = attribute.Key("process.creation.time")
+
+ // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the
+ // "process.executable.build_id.gnu" semantic conventions. It represents the GNU
+ // build ID as found in the `.note.gnu.build-id` ELF section (hex string).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "c89b11207f6479603b0d49bf291c092c2b719293"
+ ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu")
+
+ // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the
+ // "process.executable.build_id.go" semantic conventions. It represents the Go
+ // build ID as retrieved by `go tool buildid `.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY"
+ ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go")
+
+ // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the
+ // "process.executable.build_id.htlhash" semantic conventions. It represents the
+ // profiling specific build ID for executables. See the OTel specification for
+ // Profiles for more information.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "600DCAFE4A110000F2BF38C493F5FB92"
+ ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name of the
+ // process executable. On Linux based systems, this SHOULD be set to the base
+ // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to
+ // the base name of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcol"
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full path
+ // to the process executable. On Linux based systems, can be set to the target
+ // of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/usr/bin/cmd/otelcol"
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code"
+ // semantic conventions. It represents the exit code of the process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 127
+ ProcessExitCodeKey = attribute.Key("process.exit.code")
+
+ // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time"
+ // semantic conventions. It represents the date and time the process exited, in
+ // ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2023-11-21T09:26:12.315Z"
+ ProcessExitTimeKey = attribute.Key("process.exit.time")
+
+ // ProcessGroupLeaderPIDKey is the attribute Key conforming to the
+ // "process.group_leader.pid" semantic conventions. It represents the PID of the
+ // process's group leader. This is also the process group ID (PGID) of the
+ // process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 23
+ ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid")
+
+ // ProcessInteractiveKey is the attribute Key conforming to the
+ // "process.interactive" semantic conventions. It represents the whether the
+ // process is connected to an interactive shell.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ ProcessInteractiveKey = attribute.Key("process.interactive")
+
+ // ProcessLinuxCgroupKey is the attribute Key conforming to the
+ // "process.linux.cgroup" semantic conventions. It represents the control group
+ // associated with the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope",
+ // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope"
+ // Note: Control groups (cgroups) are a kernel feature used to organize and
+ // manage process resources. This attribute provides the path(s) to the
+ // cgroup(s) associated with the process, which should match the contents of the
+ // [/proc/[PID]/cgroup] file.
+ //
+ // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html
+ ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns the
+ // process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "root"
+ ProcessOwnerKey = attribute.Key("process.owner")
+
+ // ProcessPagingFaultTypeKey is the attribute Key conforming to the
+ // "process.paging.fault_type" semantic conventions. It represents the type of
+ // page fault for this data point. Type `major` is for major/hard page faults,
+ // and `minor` is for minor/soft page faults.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent Process
+ // identifier (PPID).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic
+ // conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessRealUserIDKey is the attribute Key conforming to the
+ // "process.real_user.id" semantic conventions. It represents the real user ID
+ // (RUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1000
+ ProcessRealUserIDKey = attribute.Key("process.real_user.id")
+
+ // ProcessRealUserNameKey is the attribute Key conforming to the
+ // "process.real_user.name" semantic conventions. It represents the username of
+ // the real user of the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "operator"
+ ProcessRealUserNameKey = attribute.Key("process.real_user.name")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of the
+ // runtime of this process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "OpenJDK Runtime Environment"
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the version of
+ // the runtime of this process, as returned by the runtime without modification.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 14.0.2
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessSavedUserIDKey is the attribute Key conforming to the
+ // "process.saved_user.id" semantic conventions. It represents the saved user ID
+ // (SUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1002
+ ProcessSavedUserIDKey = attribute.Key("process.saved_user.id")
+
+ // ProcessSavedUserNameKey is the attribute Key conforming to the
+ // "process.saved_user.name" semantic conventions. It represents the username of
+ // the saved user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "operator"
+ ProcessSavedUserNameKey = attribute.Key("process.saved_user.name")
+
+ // ProcessSessionLeaderPIDKey is the attribute Key conforming to the
+ // "process.session_leader.pid" semantic conventions. It represents the PID of
+ // the process's session leader. This is also the session ID (SID) of the
+ // process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 14
+ ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid")
+
+ // ProcessTitleKey is the attribute Key conforming to the "process.title"
+ // semantic conventions. It represents the process title (proctitle).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cat /etc/hostname", "xfce4-session", "bash"
+ // Note: In many Unix-like systems, process title (proctitle), is the string
+ // that represents the name or command line of a running process, displayed by
+ // system monitoring tools like ps, top, and htop.
+ ProcessTitleKey = attribute.Key("process.title")
+
+ // ProcessUserIDKey is the attribute Key conforming to the "process.user.id"
+ // semantic conventions. It represents the effective user ID (EUID) of the
+ // process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1001
+ ProcessUserIDKey = attribute.Key("process.user.id")
+
+ // ProcessUserNameKey is the attribute Key conforming to the "process.user.name"
+ // semantic conventions. It represents the username of the effective user of the
+ // process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "root"
+ ProcessUserNameKey = attribute.Key("process.user.name")
+
+ // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic
+ // conventions. It represents the virtual process identifier.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 12
+ // Note: The process ID within a PID namespace. This is not necessarily unique
+ // across all processes on the host but it is unique within the process
+ // namespace that the process exists within.
+ ProcessVpidKey = attribute.Key("process.vpid")
+
+ // ProcessWorkingDirectoryKey is the attribute Key conforming to the
+ // "process.working_directory" semantic conventions. It represents the working
+ // directory of the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/root"
+ ProcessWorkingDirectoryKey = attribute.Key("process.working_directory")
+)
+
+// ProcessArgsCount returns an attribute KeyValue conforming to the
+// "process.args_count" semantic conventions. It represents the length of the
+// process.command_args array.
+func ProcessArgsCount(val int) attribute.KeyValue {
+ return ProcessArgsCountKey.Int(val)
+}
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be set
+// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the
+// first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the command
+// arguments (including the command/executable itself) as received by the
+// process. On Linux-based systems (and some other Unixoid systems supporting
+// procfs), can be set according to the list of null-delimited strings extracted
+// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full
+// argv vector passed to `main`. SHOULD NOT be collected by default unless there
+// is sanitization that excludes sensitive data.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if
+// you have to assemble it just for monitoring; use `process.command_args`
+// instead. SHOULD NOT be collected by default unless there is sanitization that
+// excludes sensitive data.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCreationTime returns an attribute KeyValue conforming to the
+// "process.creation.time" semantic conventions. It represents the date and time
+// the process was created, in ISO 8601 format.
+func ProcessCreationTime(val string) attribute.KeyValue {
+ return ProcessCreationTimeKey.String(val)
+}
+
+// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the
+// "process.executable.build_id.gnu" semantic conventions. It represents the GNU
+// build ID as found in the `.note.gnu.build-id` ELF section (hex string).
+func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue {
+ return ProcessExecutableBuildIDGNUKey.String(val)
+}
+
+// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the
+// "process.executable.build_id.go" semantic conventions. It represents the Go
+// build ID as retrieved by `go tool buildid `.
+func ProcessExecutableBuildIDGo(val string) attribute.KeyValue {
+ return ProcessExecutableBuildIDGoKey.String(val)
+}
+
+// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to
+// the "process.executable.build_id.htlhash" semantic conventions. It represents
+// the profiling specific build ID for executables. See the OTel specification
+// for Profiles for more information.
+func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue {
+ return ProcessExecutableBuildIDHtlhashKey.String(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of the
+// process executable. On Linux based systems, this SHOULD be set to the base
+// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the
+// base name of `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path to
+// the process executable. On Linux based systems, can be set to the target of
+// `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessExitCode returns an attribute KeyValue conforming to the
+// "process.exit.code" semantic conventions. It represents the exit code of the
+// process.
+func ProcessExitCode(val int) attribute.KeyValue {
+ return ProcessExitCodeKey.Int(val)
+}
+
+// ProcessExitTime returns an attribute KeyValue conforming to the
+// "process.exit.time" semantic conventions. It represents the date and time the
+// process exited, in ISO 8601 format.
+func ProcessExitTime(val string) attribute.KeyValue {
+ return ProcessExitTimeKey.String(val)
+}
+
+// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the
+// "process.group_leader.pid" semantic conventions. It represents the PID of the
+// process's group leader. This is also the process group ID (PGID) of the
+// process.
+func ProcessGroupLeaderPID(val int) attribute.KeyValue {
+ return ProcessGroupLeaderPIDKey.Int(val)
+}
+
+// ProcessInteractive returns an attribute KeyValue conforming to the
+// "process.interactive" semantic conventions. It represents the whether the
+// process is connected to an interactive shell.
+func ProcessInteractive(val bool) attribute.KeyValue {
+ return ProcessInteractiveKey.Bool(val)
+}
+
+// ProcessLinuxCgroup returns an attribute KeyValue conforming to the
+// "process.linux.cgroup" semantic conventions. It represents the control group
+// associated with the process.
+func ProcessLinuxCgroup(val string) attribute.KeyValue {
+ return ProcessLinuxCgroupKey.String(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the "process.owner"
+// semantic conventions. It represents the username of the user that owns the
+// process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PPID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessRealUserID returns an attribute KeyValue conforming to the
+// "process.real_user.id" semantic conventions. It represents the real user ID
+// (RUID) of the process.
+func ProcessRealUserID(val int) attribute.KeyValue {
+ return ProcessRealUserIDKey.Int(val)
+}
+
+// ProcessRealUserName returns an attribute KeyValue conforming to the
+// "process.real_user.name" semantic conventions. It represents the username of
+// the real user of the process.
+func ProcessRealUserName(val string) attribute.KeyValue {
+ return ProcessRealUserNameKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessSavedUserID returns an attribute KeyValue conforming to the
+// "process.saved_user.id" semantic conventions. It represents the saved user ID
+// (SUID) of the process.
+func ProcessSavedUserID(val int) attribute.KeyValue {
+ return ProcessSavedUserIDKey.Int(val)
+}
+
+// ProcessSavedUserName returns an attribute KeyValue conforming to the
+// "process.saved_user.name" semantic conventions. It represents the username of
+// the saved user.
+func ProcessSavedUserName(val string) attribute.KeyValue {
+ return ProcessSavedUserNameKey.String(val)
+}
+
+// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the
+// "process.session_leader.pid" semantic conventions. It represents the PID of
+// the process's session leader. This is also the session ID (SID) of the
+// process.
+func ProcessSessionLeaderPID(val int) attribute.KeyValue {
+ return ProcessSessionLeaderPIDKey.Int(val)
+}
+
+// ProcessTitle returns an attribute KeyValue conforming to the "process.title"
+// semantic conventions. It represents the process title (proctitle).
+func ProcessTitle(val string) attribute.KeyValue {
+ return ProcessTitleKey.String(val)
+}
+
+// ProcessUserID returns an attribute KeyValue conforming to the
+// "process.user.id" semantic conventions. It represents the effective user ID
+// (EUID) of the process.
+func ProcessUserID(val int) attribute.KeyValue {
+ return ProcessUserIDKey.Int(val)
+}
+
+// ProcessUserName returns an attribute KeyValue conforming to the
+// "process.user.name" semantic conventions. It represents the username of the
+// effective user of the process.
+func ProcessUserName(val string) attribute.KeyValue {
+ return ProcessUserNameKey.String(val)
+}
+
+// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid"
+// semantic conventions. It represents the virtual process identifier.
+func ProcessVpid(val int) attribute.KeyValue {
+ return ProcessVpidKey.Int(val)
+}
+
+// ProcessWorkingDirectory returns an attribute KeyValue conforming to the
+// "process.working_directory" semantic conventions. It represents the working
+// directory of the process.
+func ProcessWorkingDirectory(val string) attribute.KeyValue {
+ return ProcessWorkingDirectoryKey.String(val)
+}
+
+// Enum values for process.context_switch_type
+var (
+ // voluntary
+ // Stability: development
+ ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary")
+ // involuntary
+ // Stability: development
+ ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary")
+)
+
+// Enum values for process.paging.fault_type
+var (
+ // major
+ // Stability: development
+ ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major")
+ // minor
+ // Stability: development
+ ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor")
+)
+
+// Namespace: profile
+const (
+ // ProfileFrameTypeKey is the attribute Key conforming to the
+ // "profile.frame.type" semantic conventions. It represents the describes the
+ // interpreter or compiler of a single frame.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cpython"
+ ProfileFrameTypeKey = attribute.Key("profile.frame.type")
+)
+
+// Enum values for profile.frame.type
+var (
+ // [.NET]
+ //
+ // Stability: development
+ //
+ // [.NET]: https://wikipedia.org/wiki/.NET
+ ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet")
+ // [JVM]
+ //
+ // Stability: development
+ //
+ // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine
+ ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm")
+ // [Kernel]
+ //
+ // Stability: development
+ //
+ // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system)
+ ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel")
+ // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a
+ // more precise value MUST be used.
+ //
+ // Stability: development
+ //
+ // [C]: https://wikipedia.org/wiki/C_(programming_language)
+ // [C++]: https://wikipedia.org/wiki/C%2B%2B
+ // [Go]: https://wikipedia.org/wiki/Go_(programming_language)
+ // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language)
+ ProfileFrameTypeNative = ProfileFrameTypeKey.String("native")
+ // [Perl]
+ //
+ // Stability: development
+ //
+ // [Perl]: https://wikipedia.org/wiki/Perl
+ ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl")
+ // [PHP]
+ //
+ // Stability: development
+ //
+ // [PHP]: https://wikipedia.org/wiki/PHP
+ ProfileFrameTypePHP = ProfileFrameTypeKey.String("php")
+ // [Python]
+ //
+ // Stability: development
+ //
+ // [Python]: https://wikipedia.org/wiki/Python_(programming_language)
+ ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython")
+ // [Ruby]
+ //
+ // Stability: development
+ //
+ // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language)
+ ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby")
+ // [V8JS]
+ //
+ // Stability: development
+ //
+ // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine)
+ ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js")
+ // [Erlang]
+ //
+ // Stability: development
+ //
+ // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine)
+ ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam")
+ // [Go],
+ //
+ // Stability: development
+ //
+ // [Go]: https://wikipedia.org/wiki/Go_(programming_language)
+ ProfileFrameTypeGo = ProfileFrameTypeKey.String("go")
+ // [Rust]
+ //
+ // Stability: development
+ //
+ // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language)
+ ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust")
+)
+
+// Namespace: rpc
+const (
+ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.connect_rpc.error_code" semantic conventions. It represents the
+ // [error codes] of the Connect request. Error codes are always string values.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [error codes]: https://connectrpc.com//docs/protocol/#error-codes
+ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the
+ // [numeric status code] of the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [numeric status code]: https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+
+ // RPCJSONRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code`
+ // property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: -32700, 100
+ RPCJSONRPCErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJSONRPCErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Parse error", "User already exists"
+ RPCJSONRPCErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+
+ // RPCJSONRPCRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int, string,
+ // `null` or missing (for notifications), value is expected to be cast to string
+ // for simplicity. Use empty string in case of `null` value. Omit entirely if
+ // this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "10", "request-7", ""
+ RPCJSONRPCRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJSONRPCVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // doesn't specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2.0", "1.0"
+ RPCJSONRPCVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCMessageCompressedSizeKey is the attribute Key conforming to the
+ // "rpc.message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size")
+
+ // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id"
+ // semantic conventions. It MUST be calculated as two different counters
+ // starting from `1` one for sent messages and one for received message..
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ RPCMessageIDKey = attribute.Key("rpc.message.id")
+
+ // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type"
+ // semantic conventions. It represents the whether this is a received or sent
+ // message.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCMessageTypeKey = attribute.Key("rpc.message.type")
+
+ // RPCMessageUncompressedSizeKey is the attribute Key conforming to the
+ // "rpc.message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic
+ // conventions. It represents the name of the (logical) method being called,
+ // must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: exampleMethod
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function.name` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service" semantic
+ // conventions. It represents the full (logical) name of the service being
+ // called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myservice.EchoService
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing class.
+ // The `code.namespace` attribute may be used to store the latter (despite the
+ // attribute name, it may include a class name; e.g., class with method actually
+ // executing the call on the server side, RPC client stub class on the client
+ // side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system" semantic
+ // conventions. It represents a string identifying the remoting system. See
+ // below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCSystemKey = attribute.Key("rpc.system")
+)
+
+// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code`
+// property of response if it is an error response.
+func RPCJSONRPCErrorCode(val int) attribute.KeyValue {
+ return RPCJSONRPCErrorCodeKey.Int(val)
+}
+
+// RPCJSONRPCErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJSONRPCErrorMessage(val string) attribute.KeyValue {
+ return RPCJSONRPCErrorMessageKey.String(val)
+}
+
+// RPCJSONRPCRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` property
+// of request or response. Since protocol allows id to be int, string, `null` or
+// missing (for notifications), value is expected to be cast to string for
+// simplicity. Use empty string in case of `null` value. Omit entirely if this is
+// a notification.
+func RPCJSONRPCRequestID(val string) attribute.KeyValue {
+ return RPCJSONRPCRequestIDKey.String(val)
+}
+
+// RPCJSONRPCVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol version
+// as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't
+// specify this, the value can be omitted.
+func RPCJSONRPCVersion(val string) attribute.KeyValue {
+ return RPCJSONRPCVersionKey.String(val)
+}
+
+// RPCMessageCompressedSize returns an attribute KeyValue conforming to the
+// "rpc.message.compressed_size" semantic conventions. It represents the
+// compressed size of the message in bytes.
+func RPCMessageCompressedSize(val int) attribute.KeyValue {
+ return RPCMessageCompressedSizeKey.Int(val)
+}
+
+// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id"
+// semantic conventions. It MUST be calculated as two different counters starting
+// from `1` one for sent messages and one for received message..
+func RPCMessageID(val int) attribute.KeyValue {
+ return RPCMessageIDKey.Int(val)
+}
+
+// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the
+// "rpc.message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func RPCMessageUncompressedSize(val int) attribute.KeyValue {
+ return RPCMessageUncompressedSizeKey.Int(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// Enum values for rpc.connect_rpc.error_code
+var (
+ // cancelled
+ // Stability: development
+ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+ // unknown
+ // Stability: development
+ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+ // invalid_argument
+ // Stability: development
+ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+ // deadline_exceeded
+ // Stability: development
+ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+ // not_found
+ // Stability: development
+ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+ // already_exists
+ // Stability: development
+ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+ // permission_denied
+ // Stability: development
+ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+ // resource_exhausted
+ // Stability: development
+ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+ // failed_precondition
+ // Stability: development
+ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+ // aborted
+ // Stability: development
+ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+ // out_of_range
+ // Stability: development
+ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+ // unimplemented
+ // Stability: development
+ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+ // internal
+ // Stability: development
+ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+ // unavailable
+ // Stability: development
+ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+ // data_loss
+ // Stability: development
+ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+ // unauthenticated
+ // Stability: development
+ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
+
+// Enum values for rpc.grpc.status_code
+var (
+ // OK
+ // Stability: development
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ // Stability: development
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ // Stability: development
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ // Stability: development
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ // Stability: development
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ // Stability: development
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ // Stability: development
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ // Stability: development
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ // Stability: development
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ // Stability: development
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ // Stability: development
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ // Stability: development
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ // Stability: development
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ // Stability: development
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ // Stability: development
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ // Stability: development
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ // Stability: development
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Enum values for rpc.message.type
+var (
+ // sent
+ // Stability: development
+ RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
+ // received
+ // Stability: development
+ RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
+)
+
+// Enum values for rpc.system
+var (
+ // gRPC
+ // Stability: development
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ // Stability: development
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ // Stability: development
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ // Stability: development
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+ // Connect RPC
+ // Stability: development
+ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// Namespace: security_rule
+const (
+ // SecurityRuleCategoryKey is the attribute Key conforming to the
+ // "security_rule.category" semantic conventions. It represents a categorization
+ // value keyword used by the entity using the rule for detection of this event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Attempted Information Leak"
+ SecurityRuleCategoryKey = attribute.Key("security_rule.category")
+
+ // SecurityRuleDescriptionKey is the attribute Key conforming to the
+ // "security_rule.description" semantic conventions. It represents the
+ // description of the rule generating the event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Block requests to public DNS over HTTPS / TLS protocols"
+ SecurityRuleDescriptionKey = attribute.Key("security_rule.description")
+
+ // SecurityRuleLicenseKey is the attribute Key conforming to the
+ // "security_rule.license" semantic conventions. It represents the name of the
+ // license under which the rule used to generate this event is made available.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Apache 2.0"
+ SecurityRuleLicenseKey = attribute.Key("security_rule.license")
+
+ // SecurityRuleNameKey is the attribute Key conforming to the
+ // "security_rule.name" semantic conventions. It represents the name of the rule
+ // or signature generating the event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "BLOCK_DNS_over_TLS"
+ SecurityRuleNameKey = attribute.Key("security_rule.name")
+
+ // SecurityRuleReferenceKey is the attribute Key conforming to the
+ // "security_rule.reference" semantic conventions. It represents the reference
+ // URL to additional information about the rule used to generate this event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS"
+ // Note: The URL can point to the vendor’s documentation about the rule. If
+ // that’s not available, it can also be a link to a more general page
+ // describing this type of alert.
+ SecurityRuleReferenceKey = attribute.Key("security_rule.reference")
+
+ // SecurityRuleRulesetNameKey is the attribute Key conforming to the
+ // "security_rule.ruleset.name" semantic conventions. It represents the name of
+ // the ruleset, policy, group, or parent category in which the rule used to
+ // generate this event is a member.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Standard_Protocol_Filters"
+ SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name")
+
+ // SecurityRuleUUIDKey is the attribute Key conforming to the
+ // "security_rule.uuid" semantic conventions. It represents a rule ID that is
+ // unique within the scope of a set or group of agents, observers, or other
+ // entities using the rule for detection of this event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011"
+ SecurityRuleUUIDKey = attribute.Key("security_rule.uuid")
+
+ // SecurityRuleVersionKey is the attribute Key conforming to the
+ // "security_rule.version" semantic conventions. It represents the version /
+ // revision of the rule being used for analysis.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1.0.0"
+ SecurityRuleVersionKey = attribute.Key("security_rule.version")
+)
+
+// SecurityRuleCategory returns an attribute KeyValue conforming to the
+// "security_rule.category" semantic conventions. It represents a categorization
+// value keyword used by the entity using the rule for detection of this event.
+func SecurityRuleCategory(val string) attribute.KeyValue {
+ return SecurityRuleCategoryKey.String(val)
+}
+
+// SecurityRuleDescription returns an attribute KeyValue conforming to the
+// "security_rule.description" semantic conventions. It represents the
+// description of the rule generating the event.
+func SecurityRuleDescription(val string) attribute.KeyValue {
+ return SecurityRuleDescriptionKey.String(val)
+}
+
+// SecurityRuleLicense returns an attribute KeyValue conforming to the
+// "security_rule.license" semantic conventions. It represents the name of the
+// license under which the rule used to generate this event is made available.
+func SecurityRuleLicense(val string) attribute.KeyValue {
+ return SecurityRuleLicenseKey.String(val)
+}
+
+// SecurityRuleName returns an attribute KeyValue conforming to the
+// "security_rule.name" semantic conventions. It represents the name of the rule
+// or signature generating the event.
+func SecurityRuleName(val string) attribute.KeyValue {
+ return SecurityRuleNameKey.String(val)
+}
+
+// SecurityRuleReference returns an attribute KeyValue conforming to the
+// "security_rule.reference" semantic conventions. It represents the reference
+// URL to additional information about the rule used to generate this event.
+func SecurityRuleReference(val string) attribute.KeyValue {
+ return SecurityRuleReferenceKey.String(val)
+}
+
+// SecurityRuleRulesetName returns an attribute KeyValue conforming to the
+// "security_rule.ruleset.name" semantic conventions. It represents the name of
+// the ruleset, policy, group, or parent category in which the rule used to
+// generate this event is a member.
+func SecurityRuleRulesetName(val string) attribute.KeyValue {
+ return SecurityRuleRulesetNameKey.String(val)
+}
+
+// SecurityRuleUUID returns an attribute KeyValue conforming to the
+// "security_rule.uuid" semantic conventions. It represents a rule ID that is
+// unique within the scope of a set or group of agents, observers, or other
+// entities using the rule for detection of this event.
+func SecurityRuleUUID(val string) attribute.KeyValue {
+ return SecurityRuleUUIDKey.String(val)
+}
+
+// SecurityRuleVersion returns an attribute KeyValue conforming to the
+// "security_rule.version" semantic conventions. It represents the version /
+// revision of the rule being used for analysis.
+func SecurityRuleVersion(val string) attribute.KeyValue {
+ return SecurityRuleVersionKey.String(val)
+}
+
+// Namespace: server
+const (
+ // ServerAddressKey is the attribute Key conforming to the "server.address"
+ // semantic conventions. It represents the server domain name if available
+ // without reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the client side, and when communicating through an
+ // intermediary, `server.address` SHOULD represent the server address behind any
+ // intermediaries, for example proxies, if it's available.
+ ServerAddressKey = attribute.Key("server.address")
+
+ // ServerPortKey is the attribute Key conforming to the "server.port" semantic
+ // conventions. It represents the server port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 80, 8080, 443
+ // Note: When observed from the client side, and when communicating through an
+ // intermediary, `server.port` SHOULD represent the server port behind any
+ // intermediaries, for example proxies, if it's available.
+ ServerPortKey = attribute.Key("server.port")
+)
+
+// ServerAddress returns an attribute KeyValue conforming to the "server.address"
+// semantic conventions. It represents the server domain name if available
+// without reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func ServerAddress(val string) attribute.KeyValue {
+ return ServerAddressKey.String(val)
+}
+
+// ServerPort returns an attribute KeyValue conforming to the "server.port"
+// semantic conventions. It represents the server port number.
+func ServerPort(val int) attribute.KeyValue {
+ return ServerPortKey.Int(val)
+}
+
+// Namespace: service
+const (
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID of
+ // the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "627cc493-f310-47de-96bd-71410b7dec09"
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be globally
+ // unique). The ID helps to
+ // distinguish instances of the same service that exist at the same time (e.g.
+ // instances of a horizontally scaled
+ // service).
+ //
+ // Implementations, such as SDKs, are recommended to generate a random Version 1
+ // or Version 4 [RFC
+ // 4122] UUID, but are free to use an inherent unique ID as
+ // the source of
+ // this value if stability is desirable. In that case, the ID SHOULD be used as
+ // source of a UUID Version 5 and
+ // SHOULD use the following UUID as the namespace:
+ // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`.
+ //
+ // UUIDs are typically recommended, as only an opaque value for the purposes of
+ // identifying a service instance is
+ // needed. Similar to what can be seen in the man page for the
+ // [`/etc/machine-id`] file, the underlying
+ // data, such as pod name and namespace should be treated as confidential, being
+ // the user's choice to expose it
+ // or not via another resource attribute.
+ //
+ // For applications running behind an application server (like unicorn), we do
+ // not recommend using one identifier
+ // for all processes participating in the application. Instead, it's recommended
+ // each division (e.g. a worker
+ // thread in unicorn) to have its own instance.id.
+ //
+ // It's not recommended for a Collector to set `service.instance.id` if it can't
+ // unambiguously determine the
+ // service instance that is generating that telemetry. For instance, creating an
+ // UUID based on `pod.name` will
+ // likely be wrong, as the Collector might not know from which container within
+ // that pod the telemetry originated.
+ // However, Collectors can set the `service.instance.id` if they can
+ // unambiguously determine the service instance
+ // for that telemetry. This is typically the case for scraping receivers, as
+ // they know the target address and
+ // port.
+ //
+ // [RFC
+ // 4122]: https://www.ietf.org/rfc/rfc4122.txt
+ // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceNameKey is the attribute Key conforming to the "service.name" semantic
+ // conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "shoppingcart"
+ // Note: MUST be the same for all instances of horizontally scaled services. If
+ // the value was not specified, SDKs MUST fallback to `unknown_service:`
+ // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`.
+ // If `process.executable.name` is not available, the value MUST be set to
+ // `unknown_service`.
+ //
+ // [`process.executable.name`]: process.md
+ ServiceNameKey = attribute.Key("service.name")
+
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Shop"
+ // Note: A string value having a meaning that helps to distinguish a group of
+ // services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name` is
+ // expected to be unique for all services that have no explicit namespace
+ // defined (so the empty/unspecified namespace is simply one more valid
+ // namespace). Zero-length namespace string is assumed equal to unspecified
+ // namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceVersionKey is the attribute Key conforming to the "service.version"
+ // semantic conventions. It represents the version string of the service API or
+ // implementation. The format is not defined by these conventions.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "2.0.0", "a01dbef8a"
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of the
+// service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceName returns an attribute KeyValue conforming to the "service.name"
+// semantic conventions. It represents the logical name of the service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation. The format is not defined by these
+// conventions.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// Namespace: session
+const (
+ // SessionIDKey is the attribute Key conforming to the "session.id" semantic
+ // conventions. It represents a unique id to identify a session.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 00112233-4455-6677-8899-aabbccddeeff
+ SessionIDKey = attribute.Key("session.id")
+
+ // SessionPreviousIDKey is the attribute Key conforming to the
+ // "session.previous_id" semantic conventions. It represents the previous
+ // `session.id` for this user, when known.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 00112233-4455-6677-8899-aabbccddeeff
+ SessionPreviousIDKey = attribute.Key("session.previous_id")
+)
+
+// SessionID returns an attribute KeyValue conforming to the "session.id"
+// semantic conventions. It represents a unique id to identify a session.
+func SessionID(val string) attribute.KeyValue {
+ return SessionIDKey.String(val)
+}
+
+// SessionPreviousID returns an attribute KeyValue conforming to the
+// "session.previous_id" semantic conventions. It represents the previous
+// `session.id` for this user, when known.
+func SessionPreviousID(val string) attribute.KeyValue {
+ return SessionPreviousIDKey.String(val)
+}
+
+// Namespace: signalr
+const (
+ // SignalRConnectionStatusKey is the attribute Key conforming to the
+ // "signalr.connection.status" semantic conventions. It represents the signalR
+ // HTTP connection closure status.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "app_shutdown", "timeout"
+ SignalRConnectionStatusKey = attribute.Key("signalr.connection.status")
+
+ // SignalRTransportKey is the attribute Key conforming to the
+ // "signalr.transport" semantic conventions. It represents the
+ // [SignalR transport type].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "web_sockets", "long_polling"
+ //
+ // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md
+ SignalRTransportKey = attribute.Key("signalr.transport")
+)
+
+// Enum values for signalr.connection.status
+var (
+ // The connection was closed normally.
+ // Stability: stable
+ SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure")
+ // The connection was closed due to a timeout.
+ // Stability: stable
+ SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout")
+ // The connection was closed because the app is shutting down.
+ // Stability: stable
+ SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown")
+)
+
+// Enum values for signalr.transport
+var (
+ // ServerSentEvents protocol
+ // Stability: stable
+ SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events")
+ // LongPolling protocol
+ // Stability: stable
+ SignalRTransportLongPolling = SignalRTransportKey.String("long_polling")
+ // WebSockets protocol
+ // Stability: stable
+ SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets")
+)
+
+// Namespace: source
+const (
+ // SourceAddressKey is the attribute Key conforming to the "source.address"
+ // semantic conventions. It represents the source address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix domain
+ // socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the destination side, and when communicating through
+ // an intermediary, `source.address` SHOULD represent the source address behind
+ // any intermediaries, for example proxies, if it's available.
+ SourceAddressKey = attribute.Key("source.address")
+
+ // SourcePortKey is the attribute Key conforming to the "source.port" semantic
+ // conventions. It represents the source port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3389, 2888
+ SourcePortKey = attribute.Key("source.port")
+)
+
+// SourceAddress returns an attribute KeyValue conforming to the "source.address"
+// semantic conventions. It represents the source address - domain name if
+// available without reverse DNS lookup; otherwise, IP address or Unix domain
+// socket name.
+func SourceAddress(val string) attribute.KeyValue {
+ return SourceAddressKey.String(val)
+}
+
+// SourcePort returns an attribute KeyValue conforming to the "source.port"
+// semantic conventions. It represents the source port number.
+func SourcePort(val int) attribute.KeyValue {
+ return SourcePortKey.Int(val)
+}
+
+// Namespace: system
+const (
+ // SystemCPULogicalNumberKey is the attribute Key conforming to the
+ // "system.cpu.logical_number" semantic conventions. It represents the
+ // deprecated, use `cpu.logical_number` instead.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1
+ SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
+
+ // SystemDeviceKey is the attribute Key conforming to the "system.device"
+ // semantic conventions. It represents the device identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "(identifier)"
+ SystemDeviceKey = attribute.Key("system.device")
+
+ // SystemFilesystemModeKey is the attribute Key conforming to the
+ // "system.filesystem.mode" semantic conventions. It represents the filesystem
+ // mode.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "rw, ro"
+ SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
+
+ // SystemFilesystemMountpointKey is the attribute Key conforming to the
+ // "system.filesystem.mountpoint" semantic conventions. It represents the
+ // filesystem mount path.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/mnt/data"
+ SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
+
+ // SystemFilesystemStateKey is the attribute Key conforming to the
+ // "system.filesystem.state" semantic conventions. It represents the filesystem
+ // state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "used"
+ SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
+
+ // SystemFilesystemTypeKey is the attribute Key conforming to the
+ // "system.filesystem.type" semantic conventions. It represents the filesystem
+ // type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ext4"
+ SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
+
+ // SystemMemoryStateKey is the attribute Key conforming to the
+ // "system.memory.state" semantic conventions. It represents the memory state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "free", "cached"
+ SystemMemoryStateKey = attribute.Key("system.memory.state")
+
+ // SystemPagingDirectionKey is the attribute Key conforming to the
+ // "system.paging.direction" semantic conventions. It represents the paging
+ // access direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "in"
+ SystemPagingDirectionKey = attribute.Key("system.paging.direction")
+
+ // SystemPagingStateKey is the attribute Key conforming to the
+ // "system.paging.state" semantic conventions. It represents the memory paging
+ // state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "free"
+ SystemPagingStateKey = attribute.Key("system.paging.state")
+
+ // SystemPagingTypeKey is the attribute Key conforming to the
+ // "system.paging.type" semantic conventions. It represents the memory paging
+ // type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "minor"
+ SystemPagingTypeKey = attribute.Key("system.paging.type")
+
+ // SystemProcessStatusKey is the attribute Key conforming to the
+ // "system.process.status" semantic conventions. It represents the process
+ // state, e.g., [Linux Process State Codes].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "running"
+ //
+ // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES
+ SystemProcessStatusKey = attribute.Key("system.process.status")
+)
+
+// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
+// "system.cpu.logical_number" semantic conventions. It represents the
+// deprecated, use `cpu.logical_number` instead.
+func SystemCPULogicalNumber(val int) attribute.KeyValue {
+ return SystemCPULogicalNumberKey.Int(val)
+}
+
+// SystemDevice returns an attribute KeyValue conforming to the "system.device"
+// semantic conventions. It represents the device identifier.
+func SystemDevice(val string) attribute.KeyValue {
+ return SystemDeviceKey.String(val)
+}
+
+// SystemFilesystemMode returns an attribute KeyValue conforming to the
+// "system.filesystem.mode" semantic conventions. It represents the filesystem
+// mode.
+func SystemFilesystemMode(val string) attribute.KeyValue {
+ return SystemFilesystemModeKey.String(val)
+}
+
+// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the
+// "system.filesystem.mountpoint" semantic conventions. It represents the
+// filesystem mount path.
+func SystemFilesystemMountpoint(val string) attribute.KeyValue {
+ return SystemFilesystemMountpointKey.String(val)
+}
+
+// Enum values for system.filesystem.state
+var (
+ // used
+ // Stability: development
+ SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
+ // free
+ // Stability: development
+ SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
+ // reserved
+ // Stability: development
+ SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
+)
+
+// Enum values for system.filesystem.type
+var (
+ // fat32
+ // Stability: development
+ SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
+ // exfat
+ // Stability: development
+ SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
+ // ntfs
+ // Stability: development
+ SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
+ // refs
+ // Stability: development
+ SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
+ // hfsplus
+ // Stability: development
+ SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
+ // ext4
+ // Stability: development
+ SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
+)
+
+// Enum values for system.memory.state
+var (
+ // used
+ // Stability: development
+ SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
+ // free
+ // Stability: development
+ SystemMemoryStateFree = SystemMemoryStateKey.String("free")
+ // Deprecated: Removed, report shared memory usage with
+ // `metric.system.memory.shared` metric.
+ SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
+ // buffers
+ // Stability: development
+ SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
+ // cached
+ // Stability: development
+ SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
+)
+
+// Enum values for system.paging.direction
+var (
+ // in
+ // Stability: development
+ SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
+ // out
+ // Stability: development
+ SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
+)
+
+// Enum values for system.paging.state
+var (
+ // used
+ // Stability: development
+ SystemPagingStateUsed = SystemPagingStateKey.String("used")
+ // free
+ // Stability: development
+ SystemPagingStateFree = SystemPagingStateKey.String("free")
+)
+
+// Enum values for system.paging.type
+var (
+ // major
+ // Stability: development
+ SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
+ // minor
+ // Stability: development
+ SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
+)
+
+// Enum values for system.process.status
+var (
+ // running
+ // Stability: development
+ SystemProcessStatusRunning = SystemProcessStatusKey.String("running")
+ // sleeping
+ // Stability: development
+ SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping")
+ // stopped
+ // Stability: development
+ SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped")
+ // defunct
+ // Stability: development
+ SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct")
+)
+
+// Namespace: telemetry
+const (
+ // TelemetryDistroNameKey is the attribute Key conforming to the
+ // "telemetry.distro.name" semantic conventions. It represents the name of the
+ // auto instrumentation agent or distribution, if used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "parts-unlimited-java"
+ // Note: Official auto instrumentation agents and distributions SHOULD set the
+ // `telemetry.distro.name` attribute to
+ // a string starting with `opentelemetry-`, e.g.
+ // `opentelemetry-java-instrumentation`.
+ TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
+
+ // TelemetryDistroVersionKey is the attribute Key conforming to the
+ // "telemetry.distro.version" semantic conventions. It represents the version
+ // string of the auto instrumentation agent or distribution, if used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1.2.3"
+ TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
+
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the language of
+ // the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples:
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "opentelemetry"
+ // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to
+ // `opentelemetry`.
+ // If another SDK, like a fork or a vendor-provided implementation, is used,
+ // this SDK MUST set the
+ // `telemetry.sdk.name` attribute to the fully-qualified class or module name of
+ // this SDK's main entry point
+ // or another suitable identifier depending on the language.
+ // The identifier `opentelemetry` is reserved and MUST NOT be used in this case.
+ // All custom identifiers SHOULD be stable across different versions of an
+ // implementation.
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "1.2.3"
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+)
+
+// TelemetryDistroName returns an attribute KeyValue conforming to the
+// "telemetry.distro.name" semantic conventions. It represents the name of the
+// auto instrumentation agent or distribution, if used.
+func TelemetryDistroName(val string) attribute.KeyValue {
+ return TelemetryDistroNameKey.String(val)
+}
+
+// TelemetryDistroVersion returns an attribute KeyValue conforming to the
+// "telemetry.distro.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent or distribution, if used.
+func TelemetryDistroVersion(val string) attribute.KeyValue {
+ return TelemetryDistroVersionKey.String(val)
+}
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version string
+// of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// Enum values for telemetry.sdk.language
+var (
+ // cpp
+ // Stability: stable
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ // Stability: stable
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ // Stability: stable
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ // Stability: stable
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ // Stability: stable
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ // Stability: stable
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ // Stability: stable
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ // Stability: stable
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ // Stability: stable
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // rust
+ // Stability: stable
+ TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
+ // swift
+ // Stability: stable
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+ // webjs
+ // Stability: stable
+ TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs")
+)
+
+// Namespace: test
+const (
+ // TestCaseNameKey is the attribute Key conforming to the "test.case.name"
+ // semantic conventions. It represents the fully qualified human readable name
+ // of the [test case].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1",
+ // "ExampleTestCase1_test1"
+ //
+ // [test case]: https://wikipedia.org/wiki/Test_case
+ TestCaseNameKey = attribute.Key("test.case.name")
+
+ // TestCaseResultStatusKey is the attribute Key conforming to the
+ // "test.case.result.status" semantic conventions. It represents the status of
+ // the actual test case result from test execution.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pass", "fail"
+ TestCaseResultStatusKey = attribute.Key("test.case.result.status")
+
+ // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name"
+ // semantic conventions. It represents the human readable name of a [test suite]
+ // .
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TestSuite1"
+ //
+ // [test suite]: https://wikipedia.org/wiki/Test_suite
+ TestSuiteNameKey = attribute.Key("test.suite.name")
+
+ // TestSuiteRunStatusKey is the attribute Key conforming to the
+ // "test.suite.run.status" semantic conventions. It represents the status of the
+ // test suite run.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "success", "failure", "skipped", "aborted", "timed_out",
+ // "in_progress"
+ TestSuiteRunStatusKey = attribute.Key("test.suite.run.status")
+)
+
+// TestCaseName returns an attribute KeyValue conforming to the "test.case.name"
+// semantic conventions. It represents the fully qualified human readable name of
+// the [test case].
+//
+// [test case]: https://wikipedia.org/wiki/Test_case
+func TestCaseName(val string) attribute.KeyValue {
+ return TestCaseNameKey.String(val)
+}
+
+// TestSuiteName returns an attribute KeyValue conforming to the
+// "test.suite.name" semantic conventions. It represents the human readable name
+// of a [test suite].
+//
+// [test suite]: https://wikipedia.org/wiki/Test_suite
+func TestSuiteName(val string) attribute.KeyValue {
+ return TestSuiteNameKey.String(val)
+}
+
+// Enum values for test.case.result.status
+var (
+ // pass
+ // Stability: development
+ TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass")
+ // fail
+ // Stability: development
+ TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail")
+)
+
+// Enum values for test.suite.run.status
+var (
+ // success
+ // Stability: development
+ TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success")
+ // failure
+ // Stability: development
+ TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure")
+ // skipped
+ // Stability: development
+ TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped")
+ // aborted
+ // Stability: development
+ TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted")
+ // timed_out
+ // Stability: development
+ TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out")
+ // in_progress
+ // Stability: development
+ TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress")
+)
+
+// Namespace: thread
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed to OS
+ // thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic
+ // conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: main
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic
+// conventions. It represents the current "managed" thread ID (as opposed to OS
+// thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// Namespace: tls
+const (
+ // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic
+ // conventions. It represents the string indicating the [cipher] used during the
+ // current connection.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
+ // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"
+ // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions`
+ // of the [registered TLS Cipher Suits].
+ //
+ // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5
+ // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4
+ TLSCipherKey = attribute.Key("tls.cipher")
+
+ // TLSClientCertificateKey is the attribute Key conforming to the
+ // "tls.client.certificate" semantic conventions. It represents the PEM-encoded
+ // stand-alone certificate offered by the client. This is usually
+ // mutually-exclusive of `client.certificate_chain` since this value also exists
+ // in that list.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII..."
+ TLSClientCertificateKey = attribute.Key("tls.client.certificate")
+
+ // TLSClientCertificateChainKey is the attribute Key conforming to the
+ // "tls.client.certificate_chain" semantic conventions. It represents the array
+ // of PEM-encoded certificates that make up the certificate chain offered by the
+ // client. This is usually mutually-exclusive of `client.certificate` since that
+ // value should be the first certificate in the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII...", "MI..."
+ TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
+
+ // TLSClientHashMd5Key is the attribute Key conforming to the
+ // "tls.client.hash.md5" semantic conventions. It represents the certificate
+ // fingerprint using the MD5 digest of DER-encoded version of certificate
+ // offered by the client. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC"
+ TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
+
+ // TLSClientHashSha1Key is the attribute Key conforming to the
+ // "tls.client.hash.sha1" semantic conventions. It represents the certificate
+ // fingerprint using the SHA1 digest of DER-encoded version of certificate
+ // offered by the client. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A"
+ TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
+
+ // TLSClientHashSha256Key is the attribute Key conforming to the
+ // "tls.client.hash.sha256" semantic conventions. It represents the certificate
+ // fingerprint using the SHA256 digest of DER-encoded version of certificate
+ // offered by the client. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0"
+ TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
+
+ // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer"
+ // semantic conventions. It represents the distinguished name of [subject] of
+ // the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com"
+ //
+ // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+ TLSClientIssuerKey = attribute.Key("tls.client.issuer")
+
+ // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
+ // semantic conventions. It represents a hash that identifies clients based on
+ // how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "d4e5b18d6b55c71272893221c96ba240"
+ TLSClientJa3Key = attribute.Key("tls.client.ja3")
+
+ // TLSClientNotAfterKey is the attribute Key conforming to the
+ // "tls.client.not_after" semantic conventions. It represents the date/Time
+ // indicating when client certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T00:00:00.000Z"
+ TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
+
+ // TLSClientNotBeforeKey is the attribute Key conforming to the
+ // "tls.client.not_before" semantic conventions. It represents the date/Time
+ // indicating when client certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1970-01-01T00:00:00.000Z"
+ TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
+
+ // TLSClientSubjectKey is the attribute Key conforming to the
+ // "tls.client.subject" semantic conventions. It represents the distinguished
+ // name of subject of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com"
+ TLSClientSubjectKey = attribute.Key("tls.client.subject")
+
+ // TLSClientSupportedCiphersKey is the attribute Key conforming to the
+ // "tls.client.supported_ciphers" semantic conventions. It represents the array
+ // of ciphers offered by the client during the client hello.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
+ TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
+
+ // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
+ // conventions. It represents the string indicating the curve used for the given
+ // cipher, when applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "secp256r1"
+ TLSCurveKey = attribute.Key("tls.curve")
+
+ // TLSEstablishedKey is the attribute Key conforming to the "tls.established"
+ // semantic conventions. It represents the boolean flag indicating if the TLS
+ // negotiation was successful and transitioned to an encrypted tunnel.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: true
+ TLSEstablishedKey = attribute.Key("tls.established")
+
+ // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol"
+ // semantic conventions. It represents the string indicating the protocol being
+ // tunneled. Per the values in the [IANA registry], this string should be lower
+ // case.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "http/1.1"
+ //
+ // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids
+ TLSNextProtocolKey = attribute.Key("tls.next_protocol")
+
+ // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name"
+ // semantic conventions. It represents the normalized lowercase protocol name
+ // parsed from original string of the negotiated [SSL/TLS protocol version].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values
+ TLSProtocolNameKey = attribute.Key("tls.protocol.name")
+
+ // TLSProtocolVersionKey is the attribute Key conforming to the
+ // "tls.protocol.version" semantic conventions. It represents the numeric part
+ // of the version parsed from the original string of the negotiated
+ // [SSL/TLS protocol version].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1.2", "3"
+ //
+ // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values
+ TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
+
+ // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic
+ // conventions. It represents the boolean flag indicating if this TLS connection
+ // was resumed from an existing TLS negotiation.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: true
+ TLSResumedKey = attribute.Key("tls.resumed")
+
+ // TLSServerCertificateKey is the attribute Key conforming to the
+ // "tls.server.certificate" semantic conventions. It represents the PEM-encoded
+ // stand-alone certificate offered by the server. This is usually
+ // mutually-exclusive of `server.certificate_chain` since this value also exists
+ // in that list.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII..."
+ TLSServerCertificateKey = attribute.Key("tls.server.certificate")
+
+ // TLSServerCertificateChainKey is the attribute Key conforming to the
+ // "tls.server.certificate_chain" semantic conventions. It represents the array
+ // of PEM-encoded certificates that make up the certificate chain offered by the
+ // server. This is usually mutually-exclusive of `server.certificate` since that
+ // value should be the first certificate in the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII...", "MI..."
+ TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
+
+ // TLSServerHashMd5Key is the attribute Key conforming to the
+ // "tls.server.hash.md5" semantic conventions. It represents the certificate
+ // fingerprint using the MD5 digest of DER-encoded version of certificate
+ // offered by the server. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC"
+ TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
+
+ // TLSServerHashSha1Key is the attribute Key conforming to the
+ // "tls.server.hash.sha1" semantic conventions. It represents the certificate
+ // fingerprint using the SHA1 digest of DER-encoded version of certificate
+ // offered by the server. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A"
+ TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
+
+ // TLSServerHashSha256Key is the attribute Key conforming to the
+ // "tls.server.hash.sha256" semantic conventions. It represents the certificate
+ // fingerprint using the SHA256 digest of DER-encoded version of certificate
+ // offered by the server. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0"
+ TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
+
+ // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer"
+ // semantic conventions. It represents the distinguished name of [subject] of
+ // the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com"
+ //
+ // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+ TLSServerIssuerKey = attribute.Key("tls.server.issuer")
+
+ // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s"
+ // semantic conventions. It represents a hash that identifies servers based on
+ // how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "d4e5b18d6b55c71272893221c96ba240"
+ TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
+
+ // TLSServerNotAfterKey is the attribute Key conforming to the
+ // "tls.server.not_after" semantic conventions. It represents the date/Time
+ // indicating when server certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T00:00:00.000Z"
+ TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
+
+ // TLSServerNotBeforeKey is the attribute Key conforming to the
+ // "tls.server.not_before" semantic conventions. It represents the date/Time
+ // indicating when server certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1970-01-01T00:00:00.000Z"
+ TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
+
+ // TLSServerSubjectKey is the attribute Key conforming to the
+ // "tls.server.subject" semantic conventions. It represents the distinguished
+ // name of subject of the x.509 certificate presented by the server.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com"
+ TLSServerSubjectKey = attribute.Key("tls.server.subject")
+)
+
+// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
+// semantic conventions. It represents the string indicating the [cipher] used
+// during the current connection.
+//
+// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5
+func TLSCipher(val string) attribute.KeyValue {
+ return TLSCipherKey.String(val)
+}
+
+// TLSClientCertificate returns an attribute KeyValue conforming to the
+// "tls.client.certificate" semantic conventions. It represents the PEM-encoded
+// stand-alone certificate offered by the client. This is usually
+// mutually-exclusive of `client.certificate_chain` since this value also exists
+// in that list.
+func TLSClientCertificate(val string) attribute.KeyValue {
+ return TLSClientCertificateKey.String(val)
+}
+
+// TLSClientCertificateChain returns an attribute KeyValue conforming to the
+// "tls.client.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by the
+// client. This is usually mutually-exclusive of `client.certificate` since that
+// value should be the first certificate in the chain.
+func TLSClientCertificateChain(val ...string) attribute.KeyValue {
+ return TLSClientCertificateChainKey.StringSlice(val)
+}
+
+// TLSClientHashMd5 returns an attribute KeyValue conforming to the
+// "tls.client.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate offered
+// by the client. For consistency with other hash values, this value should be
+// formatted as an uppercase hash.
+func TLSClientHashMd5(val string) attribute.KeyValue {
+ return TLSClientHashMd5Key.String(val)
+}
+
+// TLSClientHashSha1 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha1(val string) attribute.KeyValue {
+ return TLSClientHashSha1Key.String(val)
+}
+
+// TLSClientHashSha256 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha256(val string) attribute.KeyValue {
+ return TLSClientHashSha256Key.String(val)
+}
+
+// TLSClientIssuer returns an attribute KeyValue conforming to the
+// "tls.client.issuer" semantic conventions. It represents the distinguished name
+// of [subject] of the issuer of the x.509 certificate presented by the client.
+//
+// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+func TLSClientIssuer(val string) attribute.KeyValue {
+ return TLSClientIssuerKey.String(val)
+}
+
+// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3"
+// semantic conventions. It represents a hash that identifies clients based on
+// how they perform an SSL/TLS handshake.
+func TLSClientJa3(val string) attribute.KeyValue {
+ return TLSClientJa3Key.String(val)
+}
+
+// TLSClientNotAfter returns an attribute KeyValue conforming to the
+// "tls.client.not_after" semantic conventions. It represents the date/Time
+// indicating when client certificate is no longer considered valid.
+func TLSClientNotAfter(val string) attribute.KeyValue {
+ return TLSClientNotAfterKey.String(val)
+}
+
+// TLSClientNotBefore returns an attribute KeyValue conforming to the
+// "tls.client.not_before" semantic conventions. It represents the date/Time
+// indicating when client certificate is first considered valid.
+func TLSClientNotBefore(val string) attribute.KeyValue {
+ return TLSClientNotBeforeKey.String(val)
+}
+
+// TLSClientSubject returns an attribute KeyValue conforming to the
+// "tls.client.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the client.
+func TLSClientSubject(val string) attribute.KeyValue {
+ return TLSClientSubjectKey.String(val)
+}
+
+// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
+// "tls.client.supported_ciphers" semantic conventions. It represents the array
+// of ciphers offered by the client during the client hello.
+func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
+ return TLSClientSupportedCiphersKey.StringSlice(val)
+}
+
+// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic
+// conventions. It represents the string indicating the curve used for the given
+// cipher, when applicable.
+func TLSCurve(val string) attribute.KeyValue {
+ return TLSCurveKey.String(val)
+}
+
+// TLSEstablished returns an attribute KeyValue conforming to the
+// "tls.established" semantic conventions. It represents the boolean flag
+// indicating if the TLS negotiation was successful and transitioned to an
+// encrypted tunnel.
+func TLSEstablished(val bool) attribute.KeyValue {
+ return TLSEstablishedKey.Bool(val)
+}
+
+// TLSNextProtocol returns an attribute KeyValue conforming to the
+// "tls.next_protocol" semantic conventions. It represents the string indicating
+// the protocol being tunneled. Per the values in the [IANA registry], this
+// string should be lower case.
+//
+// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids
+func TLSNextProtocol(val string) attribute.KeyValue {
+ return TLSNextProtocolKey.String(val)
+}
+
+// TLSProtocolVersion returns an attribute KeyValue conforming to the
+// "tls.protocol.version" semantic conventions. It represents the numeric part of
+// the version parsed from the original string of the negotiated
+// [SSL/TLS protocol version].
+//
+// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values
+func TLSProtocolVersion(val string) attribute.KeyValue {
+ return TLSProtocolVersionKey.String(val)
+}
+
+// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
+// semantic conventions. It represents the boolean flag indicating if this TLS
+// connection was resumed from an existing TLS negotiation.
+func TLSResumed(val bool) attribute.KeyValue {
+ return TLSResumedKey.Bool(val)
+}
+
+// TLSServerCertificate returns an attribute KeyValue conforming to the
+// "tls.server.certificate" semantic conventions. It represents the PEM-encoded
+// stand-alone certificate offered by the server. This is usually
+// mutually-exclusive of `server.certificate_chain` since this value also exists
+// in that list.
+func TLSServerCertificate(val string) attribute.KeyValue {
+ return TLSServerCertificateKey.String(val)
+}
+
+// TLSServerCertificateChain returns an attribute KeyValue conforming to the
+// "tls.server.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by the
+// server. This is usually mutually-exclusive of `server.certificate` since that
+// value should be the first certificate in the chain.
+func TLSServerCertificateChain(val ...string) attribute.KeyValue {
+ return TLSServerCertificateChainKey.StringSlice(val)
+}
+
+// TLSServerHashMd5 returns an attribute KeyValue conforming to the
+// "tls.server.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate offered
+// by the server. For consistency with other hash values, this value should be
+// formatted as an uppercase hash.
+func TLSServerHashMd5(val string) attribute.KeyValue {
+ return TLSServerHashMd5Key.String(val)
+}
+
+// TLSServerHashSha1 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha1(val string) attribute.KeyValue {
+ return TLSServerHashSha1Key.String(val)
+}
+
+// TLSServerHashSha256 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha256(val string) attribute.KeyValue {
+ return TLSServerHashSha256Key.String(val)
+}
+
+// TLSServerIssuer returns an attribute KeyValue conforming to the
+// "tls.server.issuer" semantic conventions. It represents the distinguished name
+// of [subject] of the issuer of the x.509 certificate presented by the client.
+//
+// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+func TLSServerIssuer(val string) attribute.KeyValue {
+ return TLSServerIssuerKey.String(val)
+}
+
+// TLSServerJa3s returns an attribute KeyValue conforming to the
+// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
+// servers based on how they perform an SSL/TLS handshake.
+func TLSServerJa3s(val string) attribute.KeyValue {
+ return TLSServerJa3sKey.String(val)
+}
+
+// TLSServerNotAfter returns an attribute KeyValue conforming to the
+// "tls.server.not_after" semantic conventions. It represents the date/Time
+// indicating when server certificate is no longer considered valid.
+func TLSServerNotAfter(val string) attribute.KeyValue {
+ return TLSServerNotAfterKey.String(val)
+}
+
+// TLSServerNotBefore returns an attribute KeyValue conforming to the
+// "tls.server.not_before" semantic conventions. It represents the date/Time
+// indicating when server certificate is first considered valid.
+func TLSServerNotBefore(val string) attribute.KeyValue {
+ return TLSServerNotBeforeKey.String(val)
+}
+
+// TLSServerSubject returns an attribute KeyValue conforming to the
+// "tls.server.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the server.
+func TLSServerSubject(val string) attribute.KeyValue {
+ return TLSServerSubjectKey.String(val)
+}
+
+// Enum values for tls.protocol.name
+var (
+ // ssl
+ // Stability: development
+ TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
+ // tls
+ // Stability: development
+ TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
+)
+
+// Namespace: url
+const (
+ // URLDomainKey is the attribute Key conforming to the "url.domain" semantic
+ // conventions. It represents the domain extracted from the `url.full`, such as
+ // "opentelemetry.io".
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2",
+ // "[1080:0:0:0:8:800:200C:417A]"
+ // Note: In some cases a URL may refer to an IP and/or port directly, without a
+ // domain name. In this case, the IP address would go to the domain field. If
+ // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[`
+ // and `]` characters should also be captured in the domain field.
+ //
+ // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2
+ URLDomainKey = attribute.Key("url.domain")
+
+ // URLExtensionKey is the attribute Key conforming to the "url.extension"
+ // semantic conventions. It represents the file extension extracted from the
+ // `url.full`, excluding the leading dot.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "png", "gz"
+ // Note: The file extension is only set if it exists, as not every url has a
+ // file extension. When the file name has multiple extensions `example.tar.gz`,
+ // only the last one should be captured `gz`, not `tar.gz`.
+ URLExtensionKey = attribute.Key("url.extension")
+
+ // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic
+ // conventions. It represents the [URI fragment] component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "SemConv"
+ //
+ // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5
+ URLFragmentKey = attribute.Key("url.fragment")
+
+ // URLFullKey is the attribute Key conforming to the "url.full" semantic
+ // conventions. It represents the absolute URL describing a network resource
+ // according to [RFC3986].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost"
+ // Note: For network calls, URL usually has
+ // `scheme://host[:port][path][?query][#fragment]` format, where the fragment
+ // is not transmitted over HTTP, but if it is known, it SHOULD be included
+ // nevertheless.
+ //
+ // `url.full` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`.
+ // In such case username and password SHOULD be redacted and attribute's value
+ // SHOULD be `https://REDACTED:REDACTED@www.example.com/`.
+ //
+ // `url.full` SHOULD capture the absolute URL when it is available (or can be
+ // reconstructed).
+ //
+ // Sensitive content provided in `url.full` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ //
+ //
+ // Query string values for the following keys SHOULD be redacted by default and
+ // replaced by the
+ // value `REDACTED`:
+ //
+ // - [`AWSAccessKeyId`]
+ // - [`Signature`]
+ // - [`sig`]
+ // - [`X-Goog-Signature`]
+ //
+ // This list is subject to change over time.
+ //
+ // When a query string value is redacted, the query string key SHOULD still be
+ // preserved, e.g.
+ // `https://www.example.com/path?color=blue&sig=REDACTED`.
+ //
+ // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986
+ // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token
+ // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls
+ URLFullKey = attribute.Key("url.full")
+
+ // URLOriginalKey is the attribute Key conforming to the "url.original" semantic
+ // conventions. It represents the unmodified original URL as seen in the event
+ // source.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv",
+ // "search?q=OpenTelemetry"
+ // Note: In network monitoring, the observed URL may be a full URL, whereas in
+ // access logs, the URL is often just represented as a path. This field is meant
+ // to represent the URL as it was observed, complete or not.
+ // `url.original` might contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case password and
+ // username SHOULD NOT be redacted and attribute's value SHOULD remain the same.
+ URLOriginalKey = attribute.Key("url.original")
+
+ // URLPathKey is the attribute Key conforming to the "url.path" semantic
+ // conventions. It represents the [URI path] component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "/search"
+ // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ //
+ // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3
+ URLPathKey = attribute.Key("url.path")
+
+ // URLPortKey is the attribute Key conforming to the "url.port" semantic
+ // conventions. It represents the port extracted from the `url.full`.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 443
+ URLPortKey = attribute.Key("url.port")
+
+ // URLQueryKey is the attribute Key conforming to the "url.query" semantic
+ // conventions. It represents the [URI query] component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "q=OpenTelemetry"
+ // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ //
+ //
+ // Query string values for the following keys SHOULD be redacted by default and
+ // replaced by the value `REDACTED`:
+ //
+ // - [`AWSAccessKeyId`]
+ // - [`Signature`]
+ // - [`sig`]
+ // - [`X-Goog-Signature`]
+ //
+ // This list is subject to change over time.
+ //
+ // When a query string value is redacted, the query string key SHOULD still be
+ // preserved, e.g.
+ // `q=OpenTelemetry&sig=REDACTED`.
+ //
+ // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4
+ // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token
+ // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls
+ URLQueryKey = attribute.Key("url.query")
+
+ // URLRegisteredDomainKey is the attribute Key conforming to the
+ // "url.registered_domain" semantic conventions. It represents the highest
+ // registered url domain, stripped of the subdomain.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "example.com", "foo.co.uk"
+ // Note: This value can be determined precisely with the [public suffix list].
+ // For example, the registered domain for `foo.example.com` is `example.com`.
+ // Trying to approximate this by simply taking the last two labels will not work
+ // well for TLDs such as `co.uk`.
+ //
+ // [public suffix list]: https://publicsuffix.org/
+ URLRegisteredDomainKey = attribute.Key("url.registered_domain")
+
+ // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic
+ // conventions. It represents the [URI scheme] component identifying the used
+ // protocol.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "https", "ftp", "telnet"
+ //
+ // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+ URLSchemeKey = attribute.Key("url.scheme")
+
+ // URLSubdomainKey is the attribute Key conforming to the "url.subdomain"
+ // semantic conventions. It represents the subdomain portion of a fully
+ // qualified domain name includes all of the names except the host name under
+ // the registered_domain. In a partially qualified domain, or if the
+ // qualification level of the full name cannot be determined, subdomain contains
+ // all of the names below the registered domain.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "east", "sub2.sub1"
+ // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the
+ // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the
+ // subdomain field should contain `sub2.sub1`, with no trailing period.
+ URLSubdomainKey = attribute.Key("url.subdomain")
+
+ // URLTemplateKey is the attribute Key conforming to the "url.template" semantic
+ // conventions. It represents the low-cardinality template of an
+ // [absolute path reference].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/users/{id}", "/users/:id", "/users?id={id}"
+ //
+ // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+ URLTemplateKey = attribute.Key("url.template")
+
+ // URLTopLevelDomainKey is the attribute Key conforming to the
+ // "url.top_level_domain" semantic conventions. It represents the effective top
+ // level domain (eTLD), also known as the domain suffix, is the last part of the
+ // domain name. For example, the top level domain for example.com is `com`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "com", "co.uk"
+ // Note: This value can be determined precisely with the [public suffix list].
+ //
+ // [public suffix list]: https://publicsuffix.org/
+ URLTopLevelDomainKey = attribute.Key("url.top_level_domain")
+)
+
+// URLDomain returns an attribute KeyValue conforming to the "url.domain"
+// semantic conventions. It represents the domain extracted from the `url.full`,
+// such as "opentelemetry.io".
+func URLDomain(val string) attribute.KeyValue {
+ return URLDomainKey.String(val)
+}
+
+// URLExtension returns an attribute KeyValue conforming to the "url.extension"
+// semantic conventions. It represents the file extension extracted from the
+// `url.full`, excluding the leading dot.
+func URLExtension(val string) attribute.KeyValue {
+ return URLExtensionKey.String(val)
+}
+
+// URLFragment returns an attribute KeyValue conforming to the "url.fragment"
+// semantic conventions. It represents the [URI fragment] component.
+//
+// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5
+func URLFragment(val string) attribute.KeyValue {
+ return URLFragmentKey.String(val)
+}
+
+// URLFull returns an attribute KeyValue conforming to the "url.full" semantic
+// conventions. It represents the absolute URL describing a network resource
+// according to [RFC3986].
+//
+// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986
+func URLFull(val string) attribute.KeyValue {
+ return URLFullKey.String(val)
+}
+
+// URLOriginal returns an attribute KeyValue conforming to the "url.original"
+// semantic conventions. It represents the unmodified original URL as seen in the
+// event source.
+func URLOriginal(val string) attribute.KeyValue {
+ return URLOriginalKey.String(val)
+}
+
+// URLPath returns an attribute KeyValue conforming to the "url.path" semantic
+// conventions. It represents the [URI path] component.
+//
+// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3
+func URLPath(val string) attribute.KeyValue {
+ return URLPathKey.String(val)
+}
+
+// URLPort returns an attribute KeyValue conforming to the "url.port" semantic
+// conventions. It represents the port extracted from the `url.full`.
+func URLPort(val int) attribute.KeyValue {
+ return URLPortKey.Int(val)
+}
+
+// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic
+// conventions. It represents the [URI query] component.
+//
+// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4
+func URLQuery(val string) attribute.KeyValue {
+ return URLQueryKey.String(val)
+}
+
+// URLRegisteredDomain returns an attribute KeyValue conforming to the
+// "url.registered_domain" semantic conventions. It represents the highest
+// registered url domain, stripped of the subdomain.
+func URLRegisteredDomain(val string) attribute.KeyValue {
+ return URLRegisteredDomainKey.String(val)
+}
+
+// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
+// semantic conventions. It represents the [URI scheme] component identifying the
+// used protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func URLScheme(val string) attribute.KeyValue {
+ return URLSchemeKey.String(val)
+}
+
+// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain"
+// semantic conventions. It represents the subdomain portion of a fully qualified
+// domain name includes all of the names except the host name under the
+// registered_domain. In a partially qualified domain, or if the qualification
+// level of the full name cannot be determined, subdomain contains all of the
+// names below the registered domain.
+func URLSubdomain(val string) attribute.KeyValue {
+ return URLSubdomainKey.String(val)
+}
+
+// URLTemplate returns an attribute KeyValue conforming to the "url.template"
+// semantic conventions. It represents the low-cardinality template of an
+// [absolute path reference].
+//
+// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+func URLTemplate(val string) attribute.KeyValue {
+ return URLTemplateKey.String(val)
+}
+
+// URLTopLevelDomain returns an attribute KeyValue conforming to the
+// "url.top_level_domain" semantic conventions. It represents the effective top
+// level domain (eTLD), also known as the domain suffix, is the last part of the
+// domain name. For example, the top level domain for example.com is `com`.
+func URLTopLevelDomain(val string) attribute.KeyValue {
+ return URLTopLevelDomainKey.String(val)
+}
+
+// Namespace: user
+const (
+ // UserEmailKey is the attribute Key conforming to the "user.email" semantic
+ // conventions. It represents the user email address.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "a.einstein@example.com"
+ UserEmailKey = attribute.Key("user.email")
+
+ // UserFullNameKey is the attribute Key conforming to the "user.full_name"
+ // semantic conventions. It represents the user's full name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Albert Einstein"
+ UserFullNameKey = attribute.Key("user.full_name")
+
+ // UserHashKey is the attribute Key conforming to the "user.hash" semantic
+ // conventions. It represents the unique user hash to correlate information for
+ // a user in anonymized form.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa"
+ // Note: Useful if `user.id` or `user.name` contain confidential information and
+ // cannot be used.
+ UserHashKey = attribute.Key("user.hash")
+
+ // UserIDKey is the attribute Key conforming to the "user.id" semantic
+ // conventions. It represents the unique identifier of the user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000"
+ UserIDKey = attribute.Key("user.id")
+
+ // UserNameKey is the attribute Key conforming to the "user.name" semantic
+ // conventions. It represents the short name or login/username of the user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "a.einstein"
+ UserNameKey = attribute.Key("user.name")
+
+ // UserRolesKey is the attribute Key conforming to the "user.roles" semantic
+ // conventions. It represents the array of user roles at the time of the event.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "admin", "reporting_user"
+ UserRolesKey = attribute.Key("user.roles")
+)
+
+// UserEmail returns an attribute KeyValue conforming to the "user.email"
+// semantic conventions. It represents the user email address.
+func UserEmail(val string) attribute.KeyValue {
+ return UserEmailKey.String(val)
+}
+
+// UserFullName returns an attribute KeyValue conforming to the "user.full_name"
+// semantic conventions. It represents the user's full name.
+func UserFullName(val string) attribute.KeyValue {
+ return UserFullNameKey.String(val)
+}
+
+// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic
+// conventions. It represents the unique user hash to correlate information for a
+// user in anonymized form.
+func UserHash(val string) attribute.KeyValue {
+ return UserHashKey.String(val)
+}
+
+// UserID returns an attribute KeyValue conforming to the "user.id" semantic
+// conventions. It represents the unique identifier of the user.
+func UserID(val string) attribute.KeyValue {
+ return UserIDKey.String(val)
+}
+
+// UserName returns an attribute KeyValue conforming to the "user.name" semantic
+// conventions. It represents the short name or login/username of the user.
+func UserName(val string) attribute.KeyValue {
+ return UserNameKey.String(val)
+}
+
+// UserRoles returns an attribute KeyValue conforming to the "user.roles"
+// semantic conventions. It represents the array of user roles at the time of the
+// event.
+func UserRoles(val ...string) attribute.KeyValue {
+ return UserRolesKey.StringSlice(val)
+}
+
+// Namespace: user_agent
+const (
+ // UserAgentNameKey is the attribute Key conforming to the "user_agent.name"
+ // semantic conventions. It represents the name of the user-agent extracted from
+ // original. Usually refers to the browser's name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Safari", "YourApp"
+ // Note: [Example] of extracting browser's name from original string. In the
+ // case of using a user-agent for non-browser products, such as microservices
+ // with multiple names/versions inside the `user_agent.original`, the most
+ // significant name SHOULD be selected. In such a scenario it should align with
+ // `user_agent.version`
+ //
+ // [Example]: https://www.whatsmyua.info
+ UserAgentNameKey = attribute.Key("user_agent.name")
+
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of the
+ // [HTTP User-Agent] header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU
+ // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
+ // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0
+ // grpc-java-okhttp/1.27.2"
+ //
+ // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+
+ // UserAgentOSNameKey is the attribute Key conforming to the
+ // "user_agent.os.name" semantic conventions. It represents the human readable
+ // operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iOS", "Android", "Ubuntu"
+ // Note: For mapping user agent strings to OS names, libraries such as
+ // [ua-parser] can be utilized.
+ //
+ // [ua-parser]: https://github.com/ua-parser
+ UserAgentOSNameKey = attribute.Key("user_agent.os.name")
+
+ // UserAgentOSVersionKey is the attribute Key conforming to the
+ // "user_agent.os.version" semantic conventions. It represents the version
+ // string of the operating system as defined in [Version Attributes].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "14.2.1", "18.04.1"
+ // Note: For mapping user agent strings to OS versions, libraries such as
+ // [ua-parser] can be utilized.
+ //
+ // [Version Attributes]: /docs/resource/README.md#version-attributes
+ // [ua-parser]: https://github.com/ua-parser
+ UserAgentOSVersionKey = attribute.Key("user_agent.os.version")
+
+ // UserAgentSyntheticTypeKey is the attribute Key conforming to the
+ // "user_agent.synthetic.type" semantic conventions. It represents the specifies
+ // the category of synthetic traffic, such as tests or bots.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This attribute MAY be derived from the contents of the
+ // `user_agent.original` attribute. Components that populate the attribute are
+ // responsible for determining what they consider to be synthetic bot or test
+ // traffic. This attribute can either be set for self-identification purposes,
+ // or on telemetry detected to be generated as a result of a synthetic request.
+ // This attribute is useful for distinguishing between genuine client traffic
+ // and synthetic traffic generated by bots or tests.
+ UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type")
+
+ // UserAgentVersionKey is the attribute Key conforming to the
+ // "user_agent.version" semantic conventions. It represents the version of the
+ // user-agent extracted from original. Usually refers to the browser's version.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "14.1.2", "1.0.0"
+ // Note: [Example] of extracting browser's version from original string. In the
+ // case of using a user-agent for non-browser products, such as microservices
+ // with multiple names/versions inside the `user_agent.original`, the most
+ // significant version SHOULD be selected. In such a scenario it should align
+ // with `user_agent.name`
+ //
+ // [Example]: https://www.whatsmyua.info
+ UserAgentVersionKey = attribute.Key("user_agent.version")
+)
+
+// UserAgentName returns an attribute KeyValue conforming to the
+// "user_agent.name" semantic conventions. It represents the name of the
+// user-agent extracted from original. Usually refers to the browser's name.
+func UserAgentName(val string) attribute.KeyValue {
+ return UserAgentNameKey.String(val)
+}
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP User-Agent] header sent by the client.
+//
+// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
+
+// UserAgentOSName returns an attribute KeyValue conforming to the
+// "user_agent.os.name" semantic conventions. It represents the human readable
+// operating system name.
+func UserAgentOSName(val string) attribute.KeyValue {
+ return UserAgentOSNameKey.String(val)
+}
+
+// UserAgentOSVersion returns an attribute KeyValue conforming to the
+// "user_agent.os.version" semantic conventions. It represents the version string
+// of the operating system as defined in [Version Attributes].
+//
+// [Version Attributes]: /docs/resource/README.md#version-attributes
+func UserAgentOSVersion(val string) attribute.KeyValue {
+ return UserAgentOSVersionKey.String(val)
+}
+
+// UserAgentVersion returns an attribute KeyValue conforming to the
+// "user_agent.version" semantic conventions. It represents the version of the
+// user-agent extracted from original. Usually refers to the browser's version.
+func UserAgentVersion(val string) attribute.KeyValue {
+ return UserAgentVersionKey.String(val)
+}
+
+// Enum values for user_agent.synthetic.type
+var (
+ // Bot source.
+ // Stability: development
+ UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot")
+ // Synthetic test source.
+ // Stability: development
+ UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test")
+)
+
+// Namespace: vcs
+const (
+ // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id"
+ // semantic conventions. It represents the ID of the change (pull request/merge
+ // request/changelist) if applicable. This is usually a unique (within
+ // repository) identifier generated by the VCS system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123"
+ VCSChangeIDKey = attribute.Key("vcs.change.id")
+
+ // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state"
+ // semantic conventions. It represents the state of the change (pull
+ // request/merge request/changelist).
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "open", "closed", "merged"
+ VCSChangeStateKey = attribute.Key("vcs.change.state")
+
+ // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title"
+ // semantic conventions. It represents the human readable title of the change
+ // (pull request/merge request/changelist). This title is often a brief summary
+ // of the change and may get merged in to a ref as the commit summary.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update
+ // dependency"
+ VCSChangeTitleKey = attribute.Key("vcs.change.title")
+
+ // VCSLineChangeTypeKey is the attribute Key conforming to the
+ // "vcs.line_change.type" semantic conventions. It represents the type of line
+ // change being measured on a branch or change.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "added", "removed"
+ VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type")
+
+ // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name"
+ // semantic conventions. It represents the group owner within the version
+ // control system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-org", "myteam", "business-unit"
+ VCSOwnerNameKey = attribute.Key("vcs.owner.name")
+
+ // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name"
+ // semantic conventions. It represents the name of the version control system
+ // provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "github", "gitlab", "gitea", "bitbucket"
+ VCSProviderNameKey = attribute.Key("vcs.provider.name")
+
+ // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name"
+ // semantic conventions. It represents the name of the [reference] such as
+ // **branch** or **tag** in the repository.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-feature-branch", "tag-1-test"
+ // Note: `base` refers to the starting point of a change. For example, `main`
+ // would be the base reference of type branch if you've created a new
+ // reference of type branch from it and created new commits.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name")
+
+ // VCSRefBaseRevisionKey is the attribute Key conforming to the
+ // "vcs.ref.base.revision" semantic conventions. It represents the revision,
+ // literally [revised version], The revision most often refers to a commit
+ // object in Git, or a revision number in SVN.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc",
+ // "main", "123", "HEAD"
+ // Note: `base` refers to the starting point of a change. For example, `main`
+ // would be the base reference of type branch if you've created a new
+ // reference of type branch from it and created new commits. The
+ // revision can be a full [hash value (see
+ // glossary)],
+ // of the recorded change to a ref within a repository pointing to a
+ // commit [commit] object. It does
+ // not necessarily have to be a hash; it can simply define a [revision
+ // number]
+ // which is an integer that is monotonically increasing. In cases where
+ // it is identical to the `ref.base.name`, it SHOULD still be included.
+ // It is up to the implementer to decide which value to set as the
+ // revision based on the VCS system and situational context.
+ //
+ // [revised version]: https://www.merriam-webster.com/dictionary/revision
+ // [hash value (see
+ // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ // [commit]: https://git-scm.com/docs/git-commit
+ // [revision
+ // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html
+ VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision")
+
+ // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type"
+ // semantic conventions. It represents the type of the [reference] in the
+ // repository.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "branch", "tag"
+ // Note: `base` refers to the starting point of a change. For example, `main`
+ // would be the base reference of type branch if you've created a new
+ // reference of type branch from it and created new commits.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type")
+
+ // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name"
+ // semantic conventions. It represents the name of the [reference] such as
+ // **branch** or **tag** in the repository.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-feature-branch", "tag-1-test"
+ // Note: `head` refers to where you are right now; the current reference at a
+ // given time.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name")
+
+ // VCSRefHeadRevisionKey is the attribute Key conforming to the
+ // "vcs.ref.head.revision" semantic conventions. It represents the revision,
+ // literally [revised version], The revision most often refers to a commit
+ // object in Git, or a revision number in SVN.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc",
+ // "main", "123", "HEAD"
+ // Note: `head` refers to where you are right now; the current reference at a
+ // given time.The revision can be a full [hash value (see
+ // glossary)],
+ // of the recorded change to a ref within a repository pointing to a
+ // commit [commit] object. It does
+ // not necessarily have to be a hash; it can simply define a [revision
+ // number]
+ // which is an integer that is monotonically increasing. In cases where
+ // it is identical to the `ref.head.name`, it SHOULD still be included.
+ // It is up to the implementer to decide which value to set as the
+ // revision based on the VCS system and situational context.
+ //
+ // [revised version]: https://www.merriam-webster.com/dictionary/revision
+ // [hash value (see
+ // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ // [commit]: https://git-scm.com/docs/git-commit
+ // [revision
+ // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html
+ VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision")
+
+ // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type"
+ // semantic conventions. It represents the type of the [reference] in the
+ // repository.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "branch", "tag"
+ // Note: `head` refers to where you are right now; the current reference at a
+ // given time.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type")
+
+ // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic
+ // conventions. It represents the type of the [reference] in the repository.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "branch", "tag"
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefTypeKey = attribute.Key("vcs.ref.type")
+
+ // VCSRepositoryNameKey is the attribute Key conforming to the
+ // "vcs.repository.name" semantic conventions. It represents the human readable
+ // name of the repository. It SHOULD NOT include any additional identifier like
+ // Group/SubGroup in GitLab or organization in GitHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "semantic-conventions", "my-cool-repo"
+ // Note: Due to it only being the name, it can clash with forks of the same
+ // repository if collecting telemetry across multiple orgs or groups in
+ // the same backends.
+ VCSRepositoryNameKey = attribute.Key("vcs.repository.name")
+
+ // VCSRepositoryURLFullKey is the attribute Key conforming to the
+ // "vcs.repository.url.full" semantic conventions. It represents the
+ // [canonical URL] of the repository providing the complete HTTP(S) address in
+ // order to locate and identify the repository through a browser.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "https://github.com/opentelemetry/open-telemetry-collector-contrib",
+ // "https://gitlab.com/my-org/my-project/my-projects-project/repo"
+ // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include
+ // the `.git` extension.
+ //
+ // [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical.
+ VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full")
+
+ // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the
+ // "vcs.revision_delta.direction" semantic conventions. It represents the type
+ // of revision comparison.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ahead", "behind"
+ VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction")
+)
+
+// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id"
+// semantic conventions. It represents the ID of the change (pull request/merge
+// request/changelist) if applicable. This is usually a unique (within
+// repository) identifier generated by the VCS system.
+func VCSChangeID(val string) attribute.KeyValue {
+ return VCSChangeIDKey.String(val)
+}
+
+// VCSChangeTitle returns an attribute KeyValue conforming to the
+// "vcs.change.title" semantic conventions. It represents the human readable
+// title of the change (pull request/merge request/changelist). This title is
+// often a brief summary of the change and may get merged in to a ref as the
+// commit summary.
+func VCSChangeTitle(val string) attribute.KeyValue {
+ return VCSChangeTitleKey.String(val)
+}
+
+// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name"
+// semantic conventions. It represents the group owner within the version control
+// system.
+func VCSOwnerName(val string) attribute.KeyValue {
+ return VCSOwnerNameKey.String(val)
+}
+
+// VCSRefBaseName returns an attribute KeyValue conforming to the
+// "vcs.ref.base.name" semantic conventions. It represents the name of the
+// [reference] such as **branch** or **tag** in the repository.
+//
+// [reference]: https://git-scm.com/docs/gitglossary#def_ref
+func VCSRefBaseName(val string) attribute.KeyValue {
+ return VCSRefBaseNameKey.String(val)
+}
+
+// VCSRefBaseRevision returns an attribute KeyValue conforming to the
+// "vcs.ref.base.revision" semantic conventions. It represents the revision,
+// literally [revised version], The revision most often refers to a commit object
+// in Git, or a revision number in SVN.
+//
+// [revised version]: https://www.merriam-webster.com/dictionary/revision
+func VCSRefBaseRevision(val string) attribute.KeyValue {
+ return VCSRefBaseRevisionKey.String(val)
+}
+
+// VCSRefHeadName returns an attribute KeyValue conforming to the
+// "vcs.ref.head.name" semantic conventions. It represents the name of the
+// [reference] such as **branch** or **tag** in the repository.
+//
+// [reference]: https://git-scm.com/docs/gitglossary#def_ref
+func VCSRefHeadName(val string) attribute.KeyValue {
+ return VCSRefHeadNameKey.String(val)
+}
+
+// VCSRefHeadRevision returns an attribute KeyValue conforming to the
+// "vcs.ref.head.revision" semantic conventions. It represents the revision,
+// literally [revised version], The revision most often refers to a commit object
+// in Git, or a revision number in SVN.
+//
+// [revised version]: https://www.merriam-webster.com/dictionary/revision
+func VCSRefHeadRevision(val string) attribute.KeyValue {
+ return VCSRefHeadRevisionKey.String(val)
+}
+
+// VCSRepositoryName returns an attribute KeyValue conforming to the
+// "vcs.repository.name" semantic conventions. It represents the human readable
+// name of the repository. It SHOULD NOT include any additional identifier like
+// Group/SubGroup in GitLab or organization in GitHub.
+func VCSRepositoryName(val string) attribute.KeyValue {
+ return VCSRepositoryNameKey.String(val)
+}
+
+// VCSRepositoryURLFull returns an attribute KeyValue conforming to the
+// "vcs.repository.url.full" semantic conventions. It represents the
+// [canonical URL] of the repository providing the complete HTTP(S) address in
+// order to locate and identify the repository through a browser.
+//
+// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical.
+func VCSRepositoryURLFull(val string) attribute.KeyValue {
+ return VCSRepositoryURLFullKey.String(val)
+}
+
+// Enum values for vcs.change.state
+var (
+ // Open means the change is currently active and under review. It hasn't been
+ // merged into the target branch yet, and it's still possible to make changes or
+ // add comments.
+ // Stability: development
+ VCSChangeStateOpen = VCSChangeStateKey.String("open")
+ // WIP (work-in-progress, draft) means the change is still in progress and not
+ // yet ready for a full review. It might still undergo significant changes.
+ // Stability: development
+ VCSChangeStateWip = VCSChangeStateKey.String("wip")
+ // Closed means the merge request has been closed without merging. This can
+ // happen for various reasons, such as the changes being deemed unnecessary, the
+ // issue being resolved in another way, or the author deciding to withdraw the
+ // request.
+ // Stability: development
+ VCSChangeStateClosed = VCSChangeStateKey.String("closed")
+ // Merged indicates that the change has been successfully integrated into the
+ // target codebase.
+ // Stability: development
+ VCSChangeStateMerged = VCSChangeStateKey.String("merged")
+)
+
+// Enum values for vcs.line_change.type
+var (
+ // How many lines were added.
+ // Stability: development
+ VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added")
+ // How many lines were removed.
+ // Stability: development
+ VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed")
+)
+
+// Enum values for vcs.provider.name
+var (
+ // [GitHub]
+ // Stability: development
+ //
+ // [GitHub]: https://github.com
+ VCSProviderNameGithub = VCSProviderNameKey.String("github")
+ // [GitLab]
+ // Stability: development
+ //
+ // [GitLab]: https://gitlab.com
+ VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab")
+ // Deprecated: Replaced by `gitea`.
+ VCSProviderNameGittea = VCSProviderNameKey.String("gittea")
+ // [Gitea]
+ // Stability: development
+ //
+ // [Gitea]: https://gitea.io
+ VCSProviderNameGitea = VCSProviderNameKey.String("gitea")
+ // [Bitbucket]
+ // Stability: development
+ //
+ // [Bitbucket]: https://bitbucket.org
+ VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket")
+)
+
+// Enum values for vcs.ref.base.type
+var (
+ // [branch]
+ // Stability: development
+ //
+ // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch
+ VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch")
+ // [tag]
+ // Stability: development
+ //
+ // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag
+ VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag")
+)
+
+// Enum values for vcs.ref.head.type
+var (
+ // [branch]
+ // Stability: development
+ //
+ // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch
+ VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch")
+ // [tag]
+ // Stability: development
+ //
+ // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag
+ VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag")
+)
+
+// Enum values for vcs.ref.type
+var (
+ // [branch]
+ // Stability: development
+ //
+ // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch
+ VCSRefTypeBranch = VCSRefTypeKey.String("branch")
+ // [tag]
+ // Stability: development
+ //
+ // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag
+ VCSRefTypeTag = VCSRefTypeKey.String("tag")
+)
+
+// Enum values for vcs.revision_delta.direction
+var (
+ // How many revisions the change is behind the target ref.
+ // Stability: development
+ VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind")
+ // How many revisions the change is ahead of the target ref.
+ // Stability: development
+ VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead")
+)
+
+// Namespace: webengine
+const (
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the additional
+ // description of the web engine (e.g. detailed version and edition
+ // information).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final"
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "WildFly"
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of the
+ // web engine.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "21.0.0"
+ WebEngineVersionKey = attribute.Key("webengine.version")
+)
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// WebEngineName returns an attribute KeyValue conforming to the "webengine.name"
+// semantic conventions. It represents the name of the web engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the web
+// engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go
new file mode 100644
index 000000000..2c5c7ebd0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the v1.34.0
+// version of the OpenTelemetry semantic conventions.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go
new file mode 100644
index 000000000..88a998f1e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/httpconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/httpconv/metric.go
new file mode 100644
index 000000000..79843adbb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/httpconv/metric.go
@@ -0,0 +1,1418 @@
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+// Package httpconv provides types and functionality for OpenTelemetry semantic
+// conventions in the "http" namespace.
+package httpconv
+
+import (
+ "context"
+ "sync"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/noop"
+)
+
+var (
+ addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }}
+ recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }}
+)
+
+// ErrorTypeAttr is an attribute conforming to the error.type semantic
+// conventions. It represents the describes a class of error the operation ended
+// with.
+type ErrorTypeAttr string
+
+var (
+ // ErrorTypeOther is a fallback error value to be used when the instrumentation
+ // doesn't define a custom value.
+ ErrorTypeOther ErrorTypeAttr = "_OTHER"
+)
+
+// ConnectionStateAttr is an attribute conforming to the http.connection.state
+// semantic conventions. It represents the state of the HTTP connection in the
+// HTTP connection pool.
+type ConnectionStateAttr string
+
+var (
+ // ConnectionStateActive is the active state.
+ ConnectionStateActive ConnectionStateAttr = "active"
+ // ConnectionStateIdle is the idle state.
+ ConnectionStateIdle ConnectionStateAttr = "idle"
+)
+
+// RequestMethodAttr is an attribute conforming to the http.request.method
+// semantic conventions. It represents the HTTP request method.
+type RequestMethodAttr string
+
+var (
+ // RequestMethodConnect is the CONNECT method.
+ RequestMethodConnect RequestMethodAttr = "CONNECT"
+ // RequestMethodDelete is the DELETE method.
+ RequestMethodDelete RequestMethodAttr = "DELETE"
+ // RequestMethodGet is the GET method.
+ RequestMethodGet RequestMethodAttr = "GET"
+ // RequestMethodHead is the HEAD method.
+ RequestMethodHead RequestMethodAttr = "HEAD"
+ // RequestMethodOptions is the OPTIONS method.
+ RequestMethodOptions RequestMethodAttr = "OPTIONS"
+ // RequestMethodPatch is the PATCH method.
+ RequestMethodPatch RequestMethodAttr = "PATCH"
+ // RequestMethodPost is the POST method.
+ RequestMethodPost RequestMethodAttr = "POST"
+ // RequestMethodPut is the PUT method.
+ RequestMethodPut RequestMethodAttr = "PUT"
+ // RequestMethodTrace is the TRACE method.
+ RequestMethodTrace RequestMethodAttr = "TRACE"
+ // RequestMethodOther is the any HTTP method that the instrumentation has no
+ // prior knowledge of.
+ RequestMethodOther RequestMethodAttr = "_OTHER"
+)
+
+// UserAgentSyntheticTypeAttr is an attribute conforming to the
+// user_agent.synthetic.type semantic conventions. It represents the specifies
+// the category of synthetic traffic, such as tests or bots.
+type UserAgentSyntheticTypeAttr string
+
+var (
+ // UserAgentSyntheticTypeBot is the bot source.
+ UserAgentSyntheticTypeBot UserAgentSyntheticTypeAttr = "bot"
+ // UserAgentSyntheticTypeTest is the synthetic test source.
+ UserAgentSyntheticTypeTest UserAgentSyntheticTypeAttr = "test"
+)
+
+// ClientActiveRequests is an instrument used to record metric values conforming
+// to the "http.client.active_requests" semantic conventions. It represents the
+// number of active HTTP requests.
+type ClientActiveRequests struct {
+ metric.Int64UpDownCounter
+}
+
+// NewClientActiveRequests returns a new ClientActiveRequests instrument.
+func NewClientActiveRequests(
+ m metric.Meter,
+ opt ...metric.Int64UpDownCounterOption,
+) (ClientActiveRequests, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ClientActiveRequests{noop.Int64UpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64UpDownCounter(
+ "http.client.active_requests",
+ append([]metric.Int64UpDownCounterOption{
+ metric.WithDescription("Number of active HTTP requests."),
+ metric.WithUnit("{request}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ClientActiveRequests{noop.Int64UpDownCounter{}}, err
+ }
+ return ClientActiveRequests{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ClientActiveRequests) Inst() metric.Int64UpDownCounter {
+ return m.Int64UpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ClientActiveRequests) Name() string {
+ return "http.client.active_requests"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ClientActiveRequests) Unit() string {
+ return "{request}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ClientActiveRequests) Description() string {
+ return "Number of active HTTP requests."
+}
+
+// Add adds incr to the existing count.
+//
+// The serverAddress is the server domain name if available without reverse DNS
+// lookup; otherwise, IP address or Unix domain socket name.
+//
+// The serverPort is the port identifier of the ["URI origin"] HTTP request is
+// sent to.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin
+func (m ClientActiveRequests) Add(
+ ctx context.Context,
+ incr int64,
+ serverAddress string,
+ serverPort int,
+ attrs ...attribute.KeyValue,
+) {
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("server.address", serverAddress),
+ attribute.Int("server.port", serverPort),
+ )...,
+ ),
+ )
+
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AttrURLTemplate returns an optional attribute for the "url.template" semantic
+// convention. It represents the low-cardinality template of an
+// [absolute path reference].
+//
+// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+func (ClientActiveRequests) AttrURLTemplate(val string) attribute.KeyValue {
+ return attribute.String("url.template", val)
+}
+
+// AttrRequestMethod returns an optional attribute for the "http.request.method"
+// semantic convention. It represents the HTTP request method.
+func (ClientActiveRequests) AttrRequestMethod(val RequestMethodAttr) attribute.KeyValue {
+ return attribute.String("http.request.method", string(val))
+}
+
+// AttrURLScheme returns an optional attribute for the "url.scheme" semantic
+// convention. It represents the [URI scheme] component identifying the used
+// protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (ClientActiveRequests) AttrURLScheme(val string) attribute.KeyValue {
+ return attribute.String("url.scheme", val)
+}
+
+// ClientConnectionDuration is an instrument used to record metric values
+// conforming to the "http.client.connection.duration" semantic conventions. It
+// represents the duration of the successfully established outbound HTTP
+// connections.
+type ClientConnectionDuration struct {
+ metric.Float64Histogram
+}
+
+// NewClientConnectionDuration returns a new ClientConnectionDuration instrument.
+func NewClientConnectionDuration(
+ m metric.Meter,
+ opt ...metric.Float64HistogramOption,
+) (ClientConnectionDuration, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ClientConnectionDuration{noop.Float64Histogram{}}, nil
+ }
+
+ i, err := m.Float64Histogram(
+ "http.client.connection.duration",
+ append([]metric.Float64HistogramOption{
+ metric.WithDescription("The duration of the successfully established outbound HTTP connections."),
+ metric.WithUnit("s"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ClientConnectionDuration{noop.Float64Histogram{}}, err
+ }
+ return ClientConnectionDuration{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ClientConnectionDuration) Inst() metric.Float64Histogram {
+ return m.Float64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ClientConnectionDuration) Name() string {
+ return "http.client.connection.duration"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ClientConnectionDuration) Unit() string {
+ return "s"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ClientConnectionDuration) Description() string {
+ return "The duration of the successfully established outbound HTTP connections."
+}
+
+// Record records val to the current distribution.
+//
+// The serverAddress is the server domain name if available without reverse DNS
+// lookup; otherwise, IP address or Unix domain socket name.
+//
+// The serverPort is the port identifier of the ["URI origin"] HTTP request is
+// sent to.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin
+func (m ClientConnectionDuration) Record(
+ ctx context.Context,
+ val float64,
+ serverAddress string,
+ serverPort int,
+ attrs ...attribute.KeyValue,
+) {
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("server.address", serverAddress),
+ attribute.Int("server.port", serverPort),
+ )...,
+ ),
+ )
+
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrNetworkPeerAddress returns an optional attribute for the
+// "network.peer.address" semantic convention. It represents the peer address of
+// the network connection - IP address or Unix domain socket name.
+func (ClientConnectionDuration) AttrNetworkPeerAddress(val string) attribute.KeyValue {
+ return attribute.String("network.peer.address", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ClientConnectionDuration) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrURLScheme returns an optional attribute for the "url.scheme" semantic
+// convention. It represents the [URI scheme] component identifying the used
+// protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (ClientConnectionDuration) AttrURLScheme(val string) attribute.KeyValue {
+ return attribute.String("url.scheme", val)
+}
+
+// ClientOpenConnections is an instrument used to record metric values conforming
+// to the "http.client.open_connections" semantic conventions. It represents the
+// number of outbound HTTP connections that are currently active or idle on the
+// client.
+type ClientOpenConnections struct {
+ metric.Int64UpDownCounter
+}
+
+// NewClientOpenConnections returns a new ClientOpenConnections instrument.
+func NewClientOpenConnections(
+ m metric.Meter,
+ opt ...metric.Int64UpDownCounterOption,
+) (ClientOpenConnections, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ClientOpenConnections{noop.Int64UpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64UpDownCounter(
+ "http.client.open_connections",
+ append([]metric.Int64UpDownCounterOption{
+ metric.WithDescription("Number of outbound HTTP connections that are currently active or idle on the client."),
+ metric.WithUnit("{connection}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ClientOpenConnections{noop.Int64UpDownCounter{}}, err
+ }
+ return ClientOpenConnections{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ClientOpenConnections) Inst() metric.Int64UpDownCounter {
+ return m.Int64UpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ClientOpenConnections) Name() string {
+ return "http.client.open_connections"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ClientOpenConnections) Unit() string {
+ return "{connection}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ClientOpenConnections) Description() string {
+ return "Number of outbound HTTP connections that are currently active or idle on the client."
+}
+
+// Add adds incr to the existing count.
+//
+// The connectionState is the state of the HTTP connection in the HTTP connection
+// pool.
+//
+// The serverAddress is the server domain name if available without reverse DNS
+// lookup; otherwise, IP address or Unix domain socket name.
+//
+// The serverPort is the port identifier of the ["URI origin"] HTTP request is
+// sent to.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin
+func (m ClientOpenConnections) Add(
+ ctx context.Context,
+ incr int64,
+ connectionState ConnectionStateAttr,
+ serverAddress string,
+ serverPort int,
+ attrs ...attribute.KeyValue,
+) {
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.connection.state", string(connectionState)),
+ attribute.String("server.address", serverAddress),
+ attribute.Int("server.port", serverPort),
+ )...,
+ ),
+ )
+
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AttrNetworkPeerAddress returns an optional attribute for the
+// "network.peer.address" semantic convention. It represents the peer address of
+// the network connection - IP address or Unix domain socket name.
+func (ClientOpenConnections) AttrNetworkPeerAddress(val string) attribute.KeyValue {
+ return attribute.String("network.peer.address", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ClientOpenConnections) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrURLScheme returns an optional attribute for the "url.scheme" semantic
+// convention. It represents the [URI scheme] component identifying the used
+// protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (ClientOpenConnections) AttrURLScheme(val string) attribute.KeyValue {
+ return attribute.String("url.scheme", val)
+}
+
+// ClientRequestBodySize is an instrument used to record metric values conforming
+// to the "http.client.request.body.size" semantic conventions. It represents the
+// size of HTTP client request bodies.
+type ClientRequestBodySize struct {
+ metric.Int64Histogram
+}
+
+// NewClientRequestBodySize returns a new ClientRequestBodySize instrument.
+func NewClientRequestBodySize(
+ m metric.Meter,
+ opt ...metric.Int64HistogramOption,
+) (ClientRequestBodySize, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ClientRequestBodySize{noop.Int64Histogram{}}, nil
+ }
+
+ i, err := m.Int64Histogram(
+ "http.client.request.body.size",
+ append([]metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP client request bodies."),
+ metric.WithUnit("By"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ClientRequestBodySize{noop.Int64Histogram{}}, err
+ }
+ return ClientRequestBodySize{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ClientRequestBodySize) Inst() metric.Int64Histogram {
+ return m.Int64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ClientRequestBodySize) Name() string {
+ return "http.client.request.body.size"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ClientRequestBodySize) Unit() string {
+ return "By"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ClientRequestBodySize) Description() string {
+ return "Size of HTTP client request bodies."
+}
+
+// Record records val to the current distribution.
+//
+// The requestMethod is the HTTP request method.
+//
+// The serverAddress is the host identifier of the ["URI origin"] HTTP request is
+// sent to.
+//
+// The serverPort is the port identifier of the ["URI origin"] HTTP request is
+// sent to.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin
+// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin
+//
+// The size of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ClientRequestBodySize) Record(
+ ctx context.Context,
+ val int64,
+ requestMethod RequestMethodAttr,
+ serverAddress string,
+ serverPort int,
+ attrs ...attribute.KeyValue,
+) {
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("server.address", serverAddress),
+ attribute.Int("server.port", serverPort),
+ )...,
+ ),
+ )
+
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (ClientRequestBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func (ClientRequestBodySize) AttrResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrNetworkProtocolName returns an optional attribute for the
+// "network.protocol.name" semantic convention. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func (ClientRequestBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.name", val)
+}
+
+// AttrURLTemplate returns an optional attribute for the "url.template" semantic
+// convention. It represents the low-cardinality template of an
+// [absolute path reference].
+//
+// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+func (ClientRequestBodySize) AttrURLTemplate(val string) attribute.KeyValue {
+ return attribute.String("url.template", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ClientRequestBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrURLScheme returns an optional attribute for the "url.scheme" semantic
+// convention. It represents the [URI scheme] component identifying the used
+// protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (ClientRequestBodySize) AttrURLScheme(val string) attribute.KeyValue {
+ return attribute.String("url.scheme", val)
+}
+
+// ClientRequestDuration is an instrument used to record metric values conforming
+// to the "http.client.request.duration" semantic conventions. It represents the
+// duration of HTTP client requests.
+type ClientRequestDuration struct {
+ metric.Float64Histogram
+}
+
+// NewClientRequestDuration returns a new ClientRequestDuration instrument.
+func NewClientRequestDuration(
+ m metric.Meter,
+ opt ...metric.Float64HistogramOption,
+) (ClientRequestDuration, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ClientRequestDuration{noop.Float64Histogram{}}, nil
+ }
+
+ i, err := m.Float64Histogram(
+ "http.client.request.duration",
+ append([]metric.Float64HistogramOption{
+ metric.WithDescription("Duration of HTTP client requests."),
+ metric.WithUnit("s"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ClientRequestDuration{noop.Float64Histogram{}}, err
+ }
+ return ClientRequestDuration{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ClientRequestDuration) Inst() metric.Float64Histogram {
+ return m.Float64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ClientRequestDuration) Name() string {
+ return "http.client.request.duration"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ClientRequestDuration) Unit() string {
+ return "s"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ClientRequestDuration) Description() string {
+ return "Duration of HTTP client requests."
+}
+
+// Record records val to the current distribution.
+//
+// The requestMethod is the HTTP request method.
+//
+// The serverAddress is the host identifier of the ["URI origin"] HTTP request is
+// sent to.
+//
+// The serverPort is the port identifier of the ["URI origin"] HTTP request is
+// sent to.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin
+// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin
+func (m ClientRequestDuration) Record(
+ ctx context.Context,
+ val float64,
+ requestMethod RequestMethodAttr,
+ serverAddress string,
+ serverPort int,
+ attrs ...attribute.KeyValue,
+) {
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("server.address", serverAddress),
+ attribute.Int("server.port", serverPort),
+ )...,
+ ),
+ )
+
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (ClientRequestDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func (ClientRequestDuration) AttrResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrNetworkProtocolName returns an optional attribute for the
+// "network.protocol.name" semantic convention. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func (ClientRequestDuration) AttrNetworkProtocolName(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.name", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ClientRequestDuration) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrURLScheme returns an optional attribute for the "url.scheme" semantic
+// convention. It represents the [URI scheme] component identifying the used
+// protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (ClientRequestDuration) AttrURLScheme(val string) attribute.KeyValue {
+ return attribute.String("url.scheme", val)
+}
+
+// AttrURLTemplate returns an optional attribute for the "url.template" semantic
+// convention. It represents the low-cardinality template of an
+// [absolute path reference].
+//
+// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+func (ClientRequestDuration) AttrURLTemplate(val string) attribute.KeyValue {
+ return attribute.String("url.template", val)
+}
+
+// ClientResponseBodySize is an instrument used to record metric values
+// conforming to the "http.client.response.body.size" semantic conventions. It
+// represents the size of HTTP client response bodies.
+type ClientResponseBodySize struct {
+ metric.Int64Histogram
+}
+
+// NewClientResponseBodySize returns a new ClientResponseBodySize instrument.
+func NewClientResponseBodySize(
+ m metric.Meter,
+ opt ...metric.Int64HistogramOption,
+) (ClientResponseBodySize, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ClientResponseBodySize{noop.Int64Histogram{}}, nil
+ }
+
+ i, err := m.Int64Histogram(
+ "http.client.response.body.size",
+ append([]metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP client response bodies."),
+ metric.WithUnit("By"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ClientResponseBodySize{noop.Int64Histogram{}}, err
+ }
+ return ClientResponseBodySize{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ClientResponseBodySize) Inst() metric.Int64Histogram {
+ return m.Int64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ClientResponseBodySize) Name() string {
+ return "http.client.response.body.size"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ClientResponseBodySize) Unit() string {
+ return "By"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ClientResponseBodySize) Description() string {
+ return "Size of HTTP client response bodies."
+}
+
+// Record records val to the current distribution.
+//
+// The requestMethod is the HTTP request method.
+//
+// The serverAddress is the host identifier of the ["URI origin"] HTTP request is
+// sent to.
+//
+// The serverPort is the port identifier of the ["URI origin"] HTTP request is
+// sent to.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin
+// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin
+//
+// The size of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ClientResponseBodySize) Record(
+ ctx context.Context,
+ val int64,
+ requestMethod RequestMethodAttr,
+ serverAddress string,
+ serverPort int,
+ attrs ...attribute.KeyValue,
+) {
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("server.address", serverAddress),
+ attribute.Int("server.port", serverPort),
+ )...,
+ ),
+ )
+
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (ClientResponseBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func (ClientResponseBodySize) AttrResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrNetworkProtocolName returns an optional attribute for the
+// "network.protocol.name" semantic convention. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func (ClientResponseBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.name", val)
+}
+
+// AttrURLTemplate returns an optional attribute for the "url.template" semantic
+// convention. It represents the low-cardinality template of an
+// [absolute path reference].
+//
+// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+func (ClientResponseBodySize) AttrURLTemplate(val string) attribute.KeyValue {
+ return attribute.String("url.template", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ClientResponseBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrURLScheme returns an optional attribute for the "url.scheme" semantic
+// convention. It represents the [URI scheme] component identifying the used
+// protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (ClientResponseBodySize) AttrURLScheme(val string) attribute.KeyValue {
+ return attribute.String("url.scheme", val)
+}
+
+// ServerActiveRequests is an instrument used to record metric values conforming
+// to the "http.server.active_requests" semantic conventions. It represents the
+// number of active HTTP server requests.
+type ServerActiveRequests struct {
+ metric.Int64UpDownCounter
+}
+
+// NewServerActiveRequests returns a new ServerActiveRequests instrument.
+func NewServerActiveRequests(
+ m metric.Meter,
+ opt ...metric.Int64UpDownCounterOption,
+) (ServerActiveRequests, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ServerActiveRequests{noop.Int64UpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64UpDownCounter(
+ "http.server.active_requests",
+ append([]metric.Int64UpDownCounterOption{
+ metric.WithDescription("Number of active HTTP server requests."),
+ metric.WithUnit("{request}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ServerActiveRequests{noop.Int64UpDownCounter{}}, err
+ }
+ return ServerActiveRequests{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ServerActiveRequests) Inst() metric.Int64UpDownCounter {
+ return m.Int64UpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ServerActiveRequests) Name() string {
+ return "http.server.active_requests"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ServerActiveRequests) Unit() string {
+ return "{request}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ServerActiveRequests) Description() string {
+ return "Number of active HTTP server requests."
+}
+
+// Add adds incr to the existing count.
+//
+// The requestMethod is the HTTP request method.
+//
+// The urlScheme is the the [URI scheme] component identifying the used protocol.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (m ServerActiveRequests) Add(
+ ctx context.Context,
+ incr int64,
+ requestMethod RequestMethodAttr,
+ urlScheme string,
+ attrs ...attribute.KeyValue,
+) {
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("url.scheme", urlScheme),
+ )...,
+ ),
+ )
+
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the name of the local HTTP server that
+// received the request.
+func (ServerActiveRequests) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the port of the local HTTP server that received the
+// request.
+func (ServerActiveRequests) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// ServerRequestBodySize is an instrument used to record metric values conforming
+// to the "http.server.request.body.size" semantic conventions. It represents the
+// size of HTTP server request bodies.
+type ServerRequestBodySize struct {
+ metric.Int64Histogram
+}
+
+// NewServerRequestBodySize returns a new ServerRequestBodySize instrument.
+func NewServerRequestBodySize(
+ m metric.Meter,
+ opt ...metric.Int64HistogramOption,
+) (ServerRequestBodySize, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ServerRequestBodySize{noop.Int64Histogram{}}, nil
+ }
+
+ i, err := m.Int64Histogram(
+ "http.server.request.body.size",
+ append([]metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP server request bodies."),
+ metric.WithUnit("By"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ServerRequestBodySize{noop.Int64Histogram{}}, err
+ }
+ return ServerRequestBodySize{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ServerRequestBodySize) Inst() metric.Int64Histogram {
+ return m.Int64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ServerRequestBodySize) Name() string {
+ return "http.server.request.body.size"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ServerRequestBodySize) Unit() string {
+ return "By"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ServerRequestBodySize) Description() string {
+ return "Size of HTTP server request bodies."
+}
+
+// Record records val to the current distribution.
+//
+// The requestMethod is the HTTP request method.
+//
+// The urlScheme is the the [URI scheme] component identifying the used protocol.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+//
+// The size of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ServerRequestBodySize) Record(
+ ctx context.Context,
+ val int64,
+ requestMethod RequestMethodAttr,
+ urlScheme string,
+ attrs ...attribute.KeyValue,
+) {
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("url.scheme", urlScheme),
+ )...,
+ ),
+ )
+
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (ServerRequestBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func (ServerRequestBodySize) AttrResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrRoute returns an optional attribute for the "http.route" semantic
+// convention. It represents the matched route, that is, the path template in the
+// format used by the respective server framework.
+func (ServerRequestBodySize) AttrRoute(val string) attribute.KeyValue {
+ return attribute.String("http.route", val)
+}
+
+// AttrNetworkProtocolName returns an optional attribute for the
+// "network.protocol.name" semantic convention. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func (ServerRequestBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.name", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ServerRequestBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the name of the local HTTP server that
+// received the request.
+func (ServerRequestBodySize) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the port of the local HTTP server that received the
+// request.
+func (ServerRequestBodySize) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// AttrUserAgentSyntheticType returns an optional attribute for the
+// "user_agent.synthetic.type" semantic convention. It represents the specifies
+// the category of synthetic traffic, such as tests or bots.
+func (ServerRequestBodySize) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue {
+ return attribute.String("user_agent.synthetic.type", string(val))
+}
+
+// ServerRequestDuration is an instrument used to record metric values conforming
+// to the "http.server.request.duration" semantic conventions. It represents the
+// duration of HTTP server requests.
+type ServerRequestDuration struct {
+ metric.Float64Histogram
+}
+
+// NewServerRequestDuration returns a new ServerRequestDuration instrument.
+func NewServerRequestDuration(
+ m metric.Meter,
+ opt ...metric.Float64HistogramOption,
+) (ServerRequestDuration, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ServerRequestDuration{noop.Float64Histogram{}}, nil
+ }
+
+ i, err := m.Float64Histogram(
+ "http.server.request.duration",
+ append([]metric.Float64HistogramOption{
+ metric.WithDescription("Duration of HTTP server requests."),
+ metric.WithUnit("s"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ServerRequestDuration{noop.Float64Histogram{}}, err
+ }
+ return ServerRequestDuration{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ServerRequestDuration) Inst() metric.Float64Histogram {
+ return m.Float64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ServerRequestDuration) Name() string {
+ return "http.server.request.duration"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ServerRequestDuration) Unit() string {
+ return "s"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ServerRequestDuration) Description() string {
+ return "Duration of HTTP server requests."
+}
+
+// Record records val to the current distribution.
+//
+// The requestMethod is the HTTP request method.
+//
+// The urlScheme is the the [URI scheme] component identifying the used protocol.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (m ServerRequestDuration) Record(
+ ctx context.Context,
+ val float64,
+ requestMethod RequestMethodAttr,
+ urlScheme string,
+ attrs ...attribute.KeyValue,
+) {
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("url.scheme", urlScheme),
+ )...,
+ ),
+ )
+
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (ServerRequestDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func (ServerRequestDuration) AttrResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrRoute returns an optional attribute for the "http.route" semantic
+// convention. It represents the matched route, that is, the path template in the
+// format used by the respective server framework.
+func (ServerRequestDuration) AttrRoute(val string) attribute.KeyValue {
+ return attribute.String("http.route", val)
+}
+
+// AttrNetworkProtocolName returns an optional attribute for the
+// "network.protocol.name" semantic convention. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func (ServerRequestDuration) AttrNetworkProtocolName(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.name", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ServerRequestDuration) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the name of the local HTTP server that
+// received the request.
+func (ServerRequestDuration) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the port of the local HTTP server that received the
+// request.
+func (ServerRequestDuration) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// AttrUserAgentSyntheticType returns an optional attribute for the
+// "user_agent.synthetic.type" semantic convention. It represents the specifies
+// the category of synthetic traffic, such as tests or bots.
+func (ServerRequestDuration) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue {
+ return attribute.String("user_agent.synthetic.type", string(val))
+}
+
+// ServerResponseBodySize is an instrument used to record metric values
+// conforming to the "http.server.response.body.size" semantic conventions. It
+// represents the size of HTTP server response bodies.
+type ServerResponseBodySize struct {
+ metric.Int64Histogram
+}
+
+// NewServerResponseBodySize returns a new ServerResponseBodySize instrument.
+func NewServerResponseBodySize(
+ m metric.Meter,
+ opt ...metric.Int64HistogramOption,
+) (ServerResponseBodySize, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ServerResponseBodySize{noop.Int64Histogram{}}, nil
+ }
+
+ i, err := m.Int64Histogram(
+ "http.server.response.body.size",
+ append([]metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP server response bodies."),
+ metric.WithUnit("By"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ServerResponseBodySize{noop.Int64Histogram{}}, err
+ }
+ return ServerResponseBodySize{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ServerResponseBodySize) Inst() metric.Int64Histogram {
+ return m.Int64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ServerResponseBodySize) Name() string {
+ return "http.server.response.body.size"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ServerResponseBodySize) Unit() string {
+ return "By"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ServerResponseBodySize) Description() string {
+ return "Size of HTTP server response bodies."
+}
+
+// Record records val to the current distribution.
+//
+// The requestMethod is the HTTP request method.
+//
+// The urlScheme is the the [URI scheme] component identifying the used protocol.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+//
+// The size of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ServerResponseBodySize) Record(
+ ctx context.Context,
+ val int64,
+ requestMethod RequestMethodAttr,
+ urlScheme string,
+ attrs ...attribute.KeyValue,
+) {
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("url.scheme", urlScheme),
+ )...,
+ ),
+ )
+
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (ServerResponseBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func (ServerResponseBodySize) AttrResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrRoute returns an optional attribute for the "http.route" semantic
+// convention. It represents the matched route, that is, the path template in the
+// format used by the respective server framework.
+func (ServerResponseBodySize) AttrRoute(val string) attribute.KeyValue {
+ return attribute.String("http.route", val)
+}
+
+// AttrNetworkProtocolName returns an optional attribute for the
+// "network.protocol.name" semantic convention. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func (ServerResponseBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.name", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ServerResponseBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the name of the local HTTP server that
+// received the request.
+func (ServerResponseBodySize) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the port of the local HTTP server that received the
+// request.
+func (ServerResponseBodySize) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// AttrUserAgentSyntheticType returns an optional attribute for the
+// "user_agent.synthetic.type" semantic convention. It represents the specifies
+// the category of synthetic traffic, such as tests or bots.
+func (ServerResponseBodySize) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue {
+ return attribute.String("user_agent.synthetic.type", string(val))
+}
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go
new file mode 100644
index 000000000..3c23d4592
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/
+const SchemaURL = "https://opentelemetry.io/schemas/1.34.0"
diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go
new file mode 100644
index 000000000..6836c6547
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace.go
@@ -0,0 +1,36 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otel // import "go.opentelemetry.io/otel"
+
+import (
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Tracer creates a named tracer that implements Tracer interface.
+// If the name is an empty string then provider uses default name.
+//
+// This is short for GetTracerProvider().Tracer(name, opts...)
+func Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
+ return GetTracerProvider().Tracer(name, opts...)
+}
+
+// GetTracerProvider returns the registered global trace provider.
+// If none is registered then an instance of NoopTracerProvider is returned.
+//
+// Use the trace provider to create a named tracer. E.g.
+//
+// tracer := otel.GetTracerProvider().Tracer("example.com/foo")
+//
+// or
+//
+// tracer := otel.Tracer("example.com/foo")
+func GetTracerProvider() trace.TracerProvider {
+ return global.TracerProvider()
+}
+
+// SetTracerProvider registers `tp` as the global trace provider.
+func SetTracerProvider(tp trace.TracerProvider) {
+ global.SetTracerProvider(tp)
+}
diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE
similarity index 100%
rename from vendor/github.com/prometheus/procfs/LICENSE
rename to vendor/go.opentelemetry.io/otel/trace/LICENSE
diff --git a/vendor/go.opentelemetry.io/otel/trace/README.md b/vendor/go.opentelemetry.io/otel/trace/README.md
new file mode 100644
index 000000000..58ccaba69
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/README.md
@@ -0,0 +1,3 @@
+# Trace API
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/trace)
diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go
new file mode 100644
index 000000000..f3aa39813
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/auto.go
@@ -0,0 +1,662 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math"
+ "os"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+ "unicode/utf8"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ "go.opentelemetry.io/otel/trace/embedded"
+ "go.opentelemetry.io/otel/trace/internal/telemetry"
+)
+
+// newAutoTracerProvider returns an auto-instrumentable [trace.TracerProvider].
+// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument
+// the process using the returned TracerProvider, all of the telemetry it
+// produces will be processed and handled by that Instrumentation. By default,
+// if no Instrumentation instruments the TracerProvider it will not generate
+// any trace telemetry.
+func newAutoTracerProvider() TracerProvider { return tracerProviderInstance }
+
+var tracerProviderInstance = new(autoTracerProvider)
+
+type autoTracerProvider struct{ embedded.TracerProvider }
+
+var _ TracerProvider = autoTracerProvider{}
+
+func (p autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer {
+ cfg := NewTracerConfig(opts...)
+ return autoTracer{
+ name: name,
+ version: cfg.InstrumentationVersion(),
+ schemaURL: cfg.SchemaURL(),
+ }
+}
+
+type autoTracer struct {
+ embedded.Tracer
+
+ name, schemaURL, version string
+}
+
+var _ Tracer = autoTracer{}
+
+func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOption) (context.Context, Span) {
+ var psc, sc SpanContext
+ sampled := true
+ span := new(autoSpan)
+
+ // Ask eBPF for sampling decision and span context info.
+ t.start(ctx, span, &psc, &sampled, &sc)
+
+ span.sampled.Store(sampled)
+ span.spanContext = sc
+
+ ctx = ContextWithSpan(ctx, span)
+
+ if sampled {
+ // Only build traces if sampled.
+ cfg := NewSpanStartConfig(opts...)
+ span.traces, span.span = t.traces(name, cfg, span.spanContext, psc)
+ }
+
+ return ctx, span
+}
+
+// Expected to be implemented in eBPF.
+//
+//go:noinline
+func (t *autoTracer) start(
+ ctx context.Context,
+ spanPtr *autoSpan,
+ psc *SpanContext,
+ sampled *bool,
+ sc *SpanContext,
+) {
+ start(ctx, spanPtr, psc, sampled, sc)
+}
+
+// start is used for testing.
+var start = func(context.Context, *autoSpan, *SpanContext, *bool, *SpanContext) {}
+
+func (t autoTracer) traces(name string, cfg SpanConfig, sc, psc SpanContext) (*telemetry.Traces, *telemetry.Span) {
+ span := &telemetry.Span{
+ TraceID: telemetry.TraceID(sc.TraceID()),
+ SpanID: telemetry.SpanID(sc.SpanID()),
+ Flags: uint32(sc.TraceFlags()),
+ TraceState: sc.TraceState().String(),
+ ParentSpanID: telemetry.SpanID(psc.SpanID()),
+ Name: name,
+ Kind: spanKind(cfg.SpanKind()),
+ }
+
+ span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes())
+
+ links := cfg.Links()
+ if limit := maxSpan.Links; limit == 0 {
+ n := int64(len(links))
+ if n > 0 {
+ span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked.
+ }
+ } else {
+ if limit > 0 {
+ n := int64(max(len(links)-limit, 0))
+ span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked.
+ links = links[n:]
+ }
+ span.Links = convLinks(links)
+ }
+
+ if t := cfg.Timestamp(); !t.IsZero() {
+ span.StartTime = cfg.Timestamp()
+ } else {
+ span.StartTime = time.Now()
+ }
+
+ return &telemetry.Traces{
+ ResourceSpans: []*telemetry.ResourceSpans{
+ {
+ ScopeSpans: []*telemetry.ScopeSpans{
+ {
+ Scope: &telemetry.Scope{
+ Name: t.name,
+ Version: t.version,
+ },
+ Spans: []*telemetry.Span{span},
+ SchemaURL: t.schemaURL,
+ },
+ },
+ },
+ },
+ }, span
+}
+
+func spanKind(kind SpanKind) telemetry.SpanKind {
+ switch kind {
+ case SpanKindInternal:
+ return telemetry.SpanKindInternal
+ case SpanKindServer:
+ return telemetry.SpanKindServer
+ case SpanKindClient:
+ return telemetry.SpanKindClient
+ case SpanKindProducer:
+ return telemetry.SpanKindProducer
+ case SpanKindConsumer:
+ return telemetry.SpanKindConsumer
+ }
+ return telemetry.SpanKind(0) // undefined.
+}
+
+type autoSpan struct {
+ embedded.Span
+
+ spanContext SpanContext
+ sampled atomic.Bool
+
+ mu sync.Mutex
+ traces *telemetry.Traces
+ span *telemetry.Span
+}
+
+func (s *autoSpan) SpanContext() SpanContext {
+ if s == nil {
+ return SpanContext{}
+ }
+ // s.spanContext is immutable, do not acquire lock s.mu.
+ return s.spanContext
+}
+
+func (s *autoSpan) IsRecording() bool {
+ if s == nil {
+ return false
+ }
+
+ return s.sampled.Load()
+}
+
+func (s *autoSpan) SetStatus(c codes.Code, msg string) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if s.span.Status == nil {
+ s.span.Status = new(telemetry.Status)
+ }
+
+ s.span.Status.Message = msg
+
+ switch c {
+ case codes.Unset:
+ s.span.Status.Code = telemetry.StatusCodeUnset
+ case codes.Error:
+ s.span.Status.Code = telemetry.StatusCodeError
+ case codes.Ok:
+ s.span.Status.Code = telemetry.StatusCodeOK
+ }
+}
+
+func (s *autoSpan) SetAttributes(attrs ...attribute.KeyValue) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ limit := maxSpan.Attrs
+ if limit == 0 {
+ // No attributes allowed.
+ n := int64(len(attrs))
+ if n > 0 {
+ s.span.DroppedAttrs += uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked.
+ }
+ return
+ }
+
+ m := make(map[string]int)
+ for i, a := range s.span.Attrs {
+ m[a.Key] = i
+ }
+
+ for _, a := range attrs {
+ val := convAttrValue(a.Value)
+ if val.Empty() {
+ s.span.DroppedAttrs++
+ continue
+ }
+
+ if idx, ok := m[string(a.Key)]; ok {
+ s.span.Attrs[idx] = telemetry.Attr{
+ Key: string(a.Key),
+ Value: val,
+ }
+ } else if limit < 0 || len(s.span.Attrs) < limit {
+ s.span.Attrs = append(s.span.Attrs, telemetry.Attr{
+ Key: string(a.Key),
+ Value: val,
+ })
+ m[string(a.Key)] = len(s.span.Attrs) - 1
+ } else {
+ s.span.DroppedAttrs++
+ }
+ }
+}
+
+// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The
+// number of dropped attributes is also returned.
+func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) {
+ n := len(attrs)
+ if limit == 0 {
+ var out uint32
+ if n > 0 {
+ out = uint32(min(int64(n), math.MaxUint32)) // nolint: gosec // Bounds checked.
+ }
+ return nil, out
+ }
+
+ if limit < 0 {
+ // Unlimited.
+ return convAttrs(attrs), 0
+ }
+
+ if n < 0 {
+ n = 0
+ }
+
+ limit = min(n, limit)
+ return convAttrs(attrs[:limit]), uint32(n - limit) // nolint: gosec // Bounds checked.
+}
+
+func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr {
+ if len(attrs) == 0 {
+ // Avoid allocations if not necessary.
+ return nil
+ }
+
+ out := make([]telemetry.Attr, 0, len(attrs))
+ for _, attr := range attrs {
+ key := string(attr.Key)
+ val := convAttrValue(attr.Value)
+ if val.Empty() {
+ continue
+ }
+ out = append(out, telemetry.Attr{Key: key, Value: val})
+ }
+ return out
+}
+
+func convAttrValue(value attribute.Value) telemetry.Value {
+ switch value.Type() {
+ case attribute.BOOL:
+ return telemetry.BoolValue(value.AsBool())
+ case attribute.INT64:
+ return telemetry.Int64Value(value.AsInt64())
+ case attribute.FLOAT64:
+ return telemetry.Float64Value(value.AsFloat64())
+ case attribute.STRING:
+ v := truncate(maxSpan.AttrValueLen, value.AsString())
+ return telemetry.StringValue(v)
+ case attribute.BOOLSLICE:
+ slice := value.AsBoolSlice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.BoolValue(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.INT64SLICE:
+ slice := value.AsInt64Slice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.Int64Value(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.FLOAT64SLICE:
+ slice := value.AsFloat64Slice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.Float64Value(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.STRINGSLICE:
+ slice := value.AsStringSlice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ v = truncate(maxSpan.AttrValueLen, v)
+ out = append(out, telemetry.StringValue(v))
+ }
+ return telemetry.SliceValue(out...)
+ }
+ return telemetry.Value{}
+}
+
+// truncate returns a truncated version of s such that it contains less than
+// the limit number of characters. Truncation is applied by returning the limit
+// number of valid characters contained in s.
+//
+// If limit is negative, it returns the original string.
+//
+// UTF-8 is supported. When truncating, all invalid characters are dropped
+// before applying truncation.
+//
+// If s already contains less than the limit number of bytes, it is returned
+// unchanged. No invalid characters are removed.
+func truncate(limit int, s string) string {
+ // This prioritize performance in the following order based on the most
+ // common expected use-cases.
+ //
+ // - Short values less than the default limit (128).
+ // - Strings with valid encodings that exceed the limit.
+ // - No limit.
+ // - Strings with invalid encodings that exceed the limit.
+ if limit < 0 || len(s) <= limit {
+ return s
+ }
+
+ // Optimistically, assume all valid UTF-8.
+ var b strings.Builder
+ count := 0
+ for i, c := range s {
+ if c != utf8.RuneError {
+ count++
+ if count > limit {
+ return s[:i]
+ }
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // Invalid encoding.
+ b.Grow(len(s) - 1)
+ _, _ = b.WriteString(s[:i])
+ s = s[i:]
+ break
+ }
+ }
+
+ // Fast-path, no invalid input.
+ if b.Cap() == 0 {
+ return s
+ }
+
+ // Truncate while validating UTF-8.
+ for i := 0; i < len(s) && count < limit; {
+ c := s[i]
+ if c < utf8.RuneSelf {
+ // Optimization for single byte runes (common case).
+ _ = b.WriteByte(c)
+ i++
+ count++
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // We checked for all 1-byte runes above, this is a RuneError.
+ i++
+ continue
+ }
+
+ _, _ = b.WriteString(s[i : i+size])
+ i += size
+ count++
+ }
+
+ return b.String()
+}
+
+func (s *autoSpan) End(opts ...SpanEndOption) {
+ if s == nil || !s.sampled.Swap(false) {
+ return
+ }
+
+ // s.end exists so the lock (s.mu) is not held while s.ended is called.
+ s.ended(s.end(opts))
+}
+
+func (s *autoSpan) end(opts []SpanEndOption) []byte {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ cfg := NewSpanEndConfig(opts...)
+ if t := cfg.Timestamp(); !t.IsZero() {
+ s.span.EndTime = cfg.Timestamp()
+ } else {
+ s.span.EndTime = time.Now()
+ }
+
+ b, _ := json.Marshal(s.traces) // TODO: do not ignore this error.
+ return b
+}
+
+// Expected to be implemented in eBPF.
+//
+//go:noinline
+func (*autoSpan) ended(buf []byte) { ended(buf) }
+
+// ended is used for testing.
+var ended = func([]byte) {}
+
+func (s *autoSpan) RecordError(err error, opts ...EventOption) {
+ if s == nil || err == nil || !s.sampled.Load() {
+ return
+ }
+
+ cfg := NewEventConfig(opts...)
+
+ attrs := cfg.Attributes()
+ attrs = append(attrs,
+ semconv.ExceptionType(typeStr(err)),
+ semconv.ExceptionMessage(err.Error()),
+ )
+ if cfg.StackTrace() {
+ buf := make([]byte, 2048)
+ n := runtime.Stack(buf, false)
+ attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n])))
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs)
+}
+
+func typeStr(i any) string {
+ t := reflect.TypeOf(i)
+ if t.PkgPath() == "" && t.Name() == "" {
+ // Likely a builtin type.
+ return t.String()
+ }
+ return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+}
+
+func (s *autoSpan) AddEvent(name string, opts ...EventOption) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ cfg := NewEventConfig(opts...)
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.addEvent(name, cfg.Timestamp(), cfg.Attributes())
+}
+
+// addEvent adds an event with name and attrs at tStamp to the span. The span
+// lock (s.mu) needs to be held by the caller.
+func (s *autoSpan) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) {
+ limit := maxSpan.Events
+
+ if limit == 0 {
+ s.span.DroppedEvents++
+ return
+ }
+
+ if limit > 0 && len(s.span.Events) == limit {
+ // Drop head while avoiding allocation of more capacity.
+ copy(s.span.Events[:limit-1], s.span.Events[1:])
+ s.span.Events = s.span.Events[:limit-1]
+ s.span.DroppedEvents++
+ }
+
+ e := &telemetry.SpanEvent{Time: tStamp, Name: name}
+ e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs)
+
+ s.span.Events = append(s.span.Events, e)
+}
+
+func (s *autoSpan) AddLink(link Link) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ l := maxSpan.Links
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if l == 0 {
+ s.span.DroppedLinks++
+ return
+ }
+
+ if l > 0 && len(s.span.Links) == l {
+ // Drop head while avoiding allocation of more capacity.
+ copy(s.span.Links[:l-1], s.span.Links[1:])
+ s.span.Links = s.span.Links[:l-1]
+ s.span.DroppedLinks++
+ }
+
+ s.span.Links = append(s.span.Links, convLink(link))
+}
+
+func convLinks(links []Link) []*telemetry.SpanLink {
+ out := make([]*telemetry.SpanLink, 0, len(links))
+ for _, link := range links {
+ out = append(out, convLink(link))
+ }
+ return out
+}
+
+func convLink(link Link) *telemetry.SpanLink {
+ l := &telemetry.SpanLink{
+ TraceID: telemetry.TraceID(link.SpanContext.TraceID()),
+ SpanID: telemetry.SpanID(link.SpanContext.SpanID()),
+ TraceState: link.SpanContext.TraceState().String(),
+ Flags: uint32(link.SpanContext.TraceFlags()),
+ }
+ l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes)
+
+ return l
+}
+
+func (s *autoSpan) SetName(name string) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.span.Name = name
+}
+
+func (*autoSpan) TracerProvider() TracerProvider { return newAutoTracerProvider() }
+
+// maxSpan are the span limits resolved during startup.
+var maxSpan = newSpanLimits()
+
+type spanLimits struct {
+ // Attrs is the number of allowed attributes for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the
+ // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if
+ // that is not set, is used.
+ Attrs int
+ // AttrValueLen is the maximum attribute value length allowed for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the
+ // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1
+ // if that is not set, is used.
+ AttrValueLen int
+ // Events is the number of allowed events for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set.
+ Events int
+ // EventAttrs is the number of allowed attributes for a span event.
+ //
+ // The is resolved from the environment variable value for the
+ // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set.
+ EventAttrs int
+ // Links is the number of allowed Links for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set.
+ Links int
+ // LinkAttrs is the number of allowed attributes for a span link.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set.
+ LinkAttrs int
+}
+
+func newSpanLimits() spanLimits {
+ return spanLimits{
+ Attrs: firstEnv(
+ 128,
+ "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT",
+ "OTEL_ATTRIBUTE_COUNT_LIMIT",
+ ),
+ AttrValueLen: firstEnv(
+ -1, // Unlimited.
+ "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT",
+ "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT",
+ ),
+ Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"),
+ EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"),
+ Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"),
+ LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"),
+ }
+}
+
+// firstEnv returns the parsed integer value of the first matching environment
+// variable from keys. The defaultVal is returned if the value is not an
+// integer or no match is found.
+func firstEnv(defaultVal int, keys ...string) int {
+ for _, key := range keys {
+ strV := os.Getenv(key)
+ if strV == "" {
+ continue
+ }
+
+ v, err := strconv.Atoi(strV)
+ if err == nil {
+ return v
+ }
+ // Ignore invalid environment variable.
+ }
+
+ return defaultVal
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go
new file mode 100644
index 000000000..9c0b720a4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/config.go
@@ -0,0 +1,323 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// TracerConfig is a group of options for a Tracer.
+type TracerConfig struct {
+ instrumentationVersion string
+ // Schema URL of the telemetry emitted by the Tracer.
+ schemaURL string
+ attrs attribute.Set
+}
+
+// InstrumentationVersion returns the version of the library providing instrumentation.
+func (t *TracerConfig) InstrumentationVersion() string {
+ return t.instrumentationVersion
+}
+
+// InstrumentationAttributes returns the attributes associated with the library
+// providing instrumentation.
+func (t *TracerConfig) InstrumentationAttributes() attribute.Set {
+ return t.attrs
+}
+
+// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer.
+func (t *TracerConfig) SchemaURL() string {
+ return t.schemaURL
+}
+
+// NewTracerConfig applies all the options to a returned TracerConfig.
+func NewTracerConfig(options ...TracerOption) TracerConfig {
+ var config TracerConfig
+ for _, option := range options {
+ config = option.apply(config)
+ }
+ return config
+}
+
+// TracerOption applies an option to a TracerConfig.
+type TracerOption interface {
+ apply(TracerConfig) TracerConfig
+}
+
+type tracerOptionFunc func(TracerConfig) TracerConfig
+
+func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig {
+ return fn(cfg)
+}
+
+// SpanConfig is a group of options for a Span.
+type SpanConfig struct {
+ attributes []attribute.KeyValue
+ timestamp time.Time
+ links []Link
+ newRoot bool
+ spanKind SpanKind
+ stackTrace bool
+}
+
+// Attributes describe the associated qualities of a Span.
+func (cfg *SpanConfig) Attributes() []attribute.KeyValue {
+ return cfg.attributes
+}
+
+// Timestamp is a time in a Span life-cycle.
+func (cfg *SpanConfig) Timestamp() time.Time {
+ return cfg.timestamp
+}
+
+// StackTrace checks whether stack trace capturing is enabled.
+func (cfg *SpanConfig) StackTrace() bool {
+ return cfg.stackTrace
+}
+
+// Links are the associations a Span has with other Spans.
+func (cfg *SpanConfig) Links() []Link {
+ return cfg.links
+}
+
+// NewRoot identifies a Span as the root Span for a new trace. This is
+// commonly used when an existing trace crosses trust boundaries and the
+// remote parent span context should be ignored for security.
+func (cfg *SpanConfig) NewRoot() bool {
+ return cfg.newRoot
+}
+
+// SpanKind is the role a Span has in a trace.
+func (cfg *SpanConfig) SpanKind() SpanKind {
+ return cfg.spanKind
+}
+
+// NewSpanStartConfig applies all the options to a returned SpanConfig.
+// No validation is performed on the returned SpanConfig (e.g. no uniqueness
+// checking or bounding of data), it is left to the SDK to perform this
+// action.
+func NewSpanStartConfig(options ...SpanStartOption) SpanConfig {
+ var c SpanConfig
+ for _, option := range options {
+ c = option.applySpanStart(c)
+ }
+ return c
+}
+
+// NewSpanEndConfig applies all the options to a returned SpanConfig.
+// No validation is performed on the returned SpanConfig (e.g. no uniqueness
+// checking or bounding of data), it is left to the SDK to perform this
+// action.
+func NewSpanEndConfig(options ...SpanEndOption) SpanConfig {
+ var c SpanConfig
+ for _, option := range options {
+ c = option.applySpanEnd(c)
+ }
+ return c
+}
+
+// SpanStartOption applies an option to a SpanConfig. These options are applicable
+// only when the span is created.
+type SpanStartOption interface {
+ applySpanStart(SpanConfig) SpanConfig
+}
+
+type spanOptionFunc func(SpanConfig) SpanConfig
+
+func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig {
+ return fn(cfg)
+}
+
+// SpanEndOption applies an option to a SpanConfig. These options are
+// applicable only when the span is ended.
+type SpanEndOption interface {
+ applySpanEnd(SpanConfig) SpanConfig
+}
+
+// EventConfig is a group of options for an Event.
+type EventConfig struct {
+ attributes []attribute.KeyValue
+ timestamp time.Time
+ stackTrace bool
+}
+
+// Attributes describe the associated qualities of an Event.
+func (cfg *EventConfig) Attributes() []attribute.KeyValue {
+ return cfg.attributes
+}
+
+// Timestamp is a time in an Event life-cycle.
+func (cfg *EventConfig) Timestamp() time.Time {
+ return cfg.timestamp
+}
+
+// StackTrace checks whether stack trace capturing is enabled.
+func (cfg *EventConfig) StackTrace() bool {
+ return cfg.stackTrace
+}
+
+// NewEventConfig applies all the EventOptions to a returned EventConfig. If no
+// timestamp option is passed, the returned EventConfig will have a Timestamp
+// set to the call time, otherwise no validation is performed on the returned
+// EventConfig.
+func NewEventConfig(options ...EventOption) EventConfig {
+ var c EventConfig
+ for _, option := range options {
+ c = option.applyEvent(c)
+ }
+ if c.timestamp.IsZero() {
+ c.timestamp = time.Now()
+ }
+ return c
+}
+
+// EventOption applies span event options to an EventConfig.
+type EventOption interface {
+ applyEvent(EventConfig) EventConfig
+}
+
+// SpanOption are options that can be used at both the beginning and end of a span.
+type SpanOption interface {
+ SpanStartOption
+ SpanEndOption
+}
+
+// SpanStartEventOption are options that can be used at the start of a span, or with an event.
+type SpanStartEventOption interface {
+ SpanStartOption
+ EventOption
+}
+
+// SpanEndEventOption are options that can be used at the end of a span, or with an event.
+type SpanEndEventOption interface {
+ SpanEndOption
+ EventOption
+}
+
+type attributeOption []attribute.KeyValue
+
+func (o attributeOption) applySpan(c SpanConfig) SpanConfig {
+ c.attributes = append(c.attributes, []attribute.KeyValue(o)...)
+ return c
+}
+func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) }
+func (o attributeOption) applyEvent(c EventConfig) EventConfig {
+ c.attributes = append(c.attributes, []attribute.KeyValue(o)...)
+ return c
+}
+
+var _ SpanStartEventOption = attributeOption{}
+
+// WithAttributes adds the attributes related to a span life-cycle event.
+// These attributes are used to describe the work a Span represents when this
+// option is provided to a Span's start event. Otherwise, these
+// attributes provide additional information about the event being recorded
+// (e.g. error, state change, processing progress, system event).
+//
+// If multiple of these options are passed the attributes of each successive
+// option will extend the attributes instead of overwriting. There is no
+// guarantee of uniqueness in the resulting attributes.
+func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption {
+ return attributeOption(attributes)
+}
+
+// SpanEventOption are options that can be used with an event or a span.
+type SpanEventOption interface {
+ SpanOption
+ EventOption
+}
+
+type timestampOption time.Time
+
+func (o timestampOption) applySpan(c SpanConfig) SpanConfig {
+ c.timestamp = time.Time(o)
+ return c
+}
+func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) }
+func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) }
+func (o timestampOption) applyEvent(c EventConfig) EventConfig {
+ c.timestamp = time.Time(o)
+ return c
+}
+
+var _ SpanEventOption = timestampOption{}
+
+// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g.
+// started, stopped, errored).
+func WithTimestamp(t time.Time) SpanEventOption {
+ return timestampOption(t)
+}
+
+type stackTraceOption bool
+
+func (o stackTraceOption) applyEvent(c EventConfig) EventConfig {
+ c.stackTrace = bool(o)
+ return c
+}
+
+func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig {
+ c.stackTrace = bool(o)
+ return c
+}
+func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) }
+
+// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false).
+func WithStackTrace(b bool) SpanEndEventOption {
+ return stackTraceOption(b)
+}
+
+// WithLinks adds links to a Span. The links are added to the existing Span
+// links, i.e. this does not overwrite. Links with invalid span context are ignored.
+func WithLinks(links ...Link) SpanStartOption {
+ return spanOptionFunc(func(cfg SpanConfig) SpanConfig {
+ cfg.links = append(cfg.links, links...)
+ return cfg
+ })
+}
+
+// WithNewRoot specifies that the Span should be treated as a root Span. Any
+// existing parent span context will be ignored when defining the Span's trace
+// identifiers.
+func WithNewRoot() SpanStartOption {
+ return spanOptionFunc(func(cfg SpanConfig) SpanConfig {
+ cfg.newRoot = true
+ return cfg
+ })
+}
+
+// WithSpanKind sets the SpanKind of a Span.
+func WithSpanKind(kind SpanKind) SpanStartOption {
+ return spanOptionFunc(func(cfg SpanConfig) SpanConfig {
+ cfg.spanKind = kind
+ return cfg
+ })
+}
+
+// WithInstrumentationVersion sets the instrumentation version.
+func WithInstrumentationVersion(version string) TracerOption {
+ return tracerOptionFunc(func(cfg TracerConfig) TracerConfig {
+ cfg.instrumentationVersion = version
+ return cfg
+ })
+}
+
+// WithInstrumentationAttributes sets the instrumentation attributes.
+//
+// The passed attributes will be de-duplicated.
+func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption {
+ return tracerOptionFunc(func(config TracerConfig) TracerConfig {
+ config.attrs = attribute.NewSet(attr...)
+ return config
+ })
+}
+
+// WithSchemaURL sets the schema URL for the Tracer.
+func WithSchemaURL(schemaURL string) TracerOption {
+ return tracerOptionFunc(func(cfg TracerConfig) TracerConfig {
+ cfg.schemaURL = schemaURL
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go
new file mode 100644
index 000000000..8c45a7107
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/context.go
@@ -0,0 +1,50 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import "context"
+
+type traceContextKeyType int
+
+const currentSpanKey traceContextKeyType = iota
+
+// ContextWithSpan returns a copy of parent with span set as the current Span.
+func ContextWithSpan(parent context.Context, span Span) context.Context {
+ return context.WithValue(parent, currentSpanKey, span)
+}
+
+// ContextWithSpanContext returns a copy of parent with sc as the current
+// Span. The Span implementation that wraps sc is non-recording and performs
+// no operations other than to return sc as the SpanContext from the
+// SpanContext method.
+func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context {
+ return ContextWithSpan(parent, nonRecordingSpan{sc: sc})
+}
+
+// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly
+// as a remote SpanContext and as the current Span. The Span implementation
+// that wraps rsc is non-recording and performs no operations other than to
+// return rsc as the SpanContext from the SpanContext method.
+func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context {
+ return ContextWithSpanContext(parent, rsc.WithRemote(true))
+}
+
+// SpanFromContext returns the current Span from ctx.
+//
+// If no Span is currently set in ctx an implementation of a Span that
+// performs no operations is returned.
+func SpanFromContext(ctx context.Context) Span {
+ if ctx == nil {
+ return noopSpanInstance
+ }
+ if span, ok := ctx.Value(currentSpanKey).(Span); ok {
+ return span
+ }
+ return noopSpanInstance
+}
+
+// SpanContextFromContext returns the current Span's SpanContext.
+func SpanContextFromContext(ctx context.Context) SpanContext {
+ return SpanFromContext(ctx).SpanContext()
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go
new file mode 100644
index 000000000..cdbf41d6d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/doc.go
@@ -0,0 +1,119 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package trace provides an implementation of the tracing part of the
+OpenTelemetry API.
+
+To participate in distributed traces a Span needs to be created for the
+operation being performed as part of a traced workflow. In its simplest form:
+
+ var tracer trace.Tracer
+
+ func init() {
+ tracer = otel.Tracer("instrumentation/package/name")
+ }
+
+ func operation(ctx context.Context) {
+ var span trace.Span
+ ctx, span = tracer.Start(ctx, "operation")
+ defer span.End()
+ // ...
+ }
+
+A Tracer is unique to the instrumentation and is used to create Spans.
+Instrumentation should be designed to accept a TracerProvider from which it
+can create its own unique Tracer. Alternatively, the registered global
+TracerProvider from the go.opentelemetry.io/otel package can be used as
+a default.
+
+ const (
+ name = "instrumentation/package/name"
+ version = "0.1.0"
+ )
+
+ type Instrumentation struct {
+ tracer trace.Tracer
+ }
+
+ func NewInstrumentation(tp trace.TracerProvider) *Instrumentation {
+ if tp == nil {
+ tp = otel.TracerProvider()
+ }
+ return &Instrumentation{
+ tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)),
+ }
+ }
+
+ func operation(ctx context.Context, inst *Instrumentation) {
+ var span trace.Span
+ ctx, span = inst.tracer.Start(ctx, "operation")
+ defer span.End()
+ // ...
+ }
+
+# API Implementations
+
+This package does not conform to the standard Go versioning policy; all of its
+interfaces may have methods added to them without a package major version bump.
+This non-standard API evolution could surprise an uninformed implementation
+author. They could unknowingly build their implementation in a way that would
+result in a runtime panic for their users that update to the new API.
+
+The API is designed to help inform an instrumentation author about this
+non-standard API evolution. It requires them to choose a default behavior for
+unimplemented interface methods. There are three behavior choices they can
+make:
+
+ - Compilation failure
+ - Panic
+ - Default to another implementation
+
+All interfaces in this API embed a corresponding interface from
+[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default
+behavior of their implementations to be a compilation failure, signaling to
+their users they need to update to the latest version of that implementation,
+they need to embed the corresponding interface from
+[go.opentelemetry.io/otel/trace/embedded] in their implementation. For
+example,
+
+ import "go.opentelemetry.io/otel/trace/embedded"
+
+ type TracerProvider struct {
+ embedded.TracerProvider
+ // ...
+ }
+
+If an author wants the default behavior of their implementations to panic, they
+can embed the API interface directly.
+
+ import "go.opentelemetry.io/otel/trace"
+
+ type TracerProvider struct {
+ trace.TracerProvider
+ // ...
+ }
+
+This option is not recommended. It will lead to publishing packages that
+contain runtime panics when users update to newer versions of
+[go.opentelemetry.io/otel/trace], which may be done with a transitive
+dependency.
+
+Finally, an author can embed another implementation in theirs. The embedded
+implementation will be used for methods not defined by the author. For example,
+an author who wants to default to silently dropping the call can use
+[go.opentelemetry.io/otel/trace/noop]:
+
+ import "go.opentelemetry.io/otel/trace/noop"
+
+ type TracerProvider struct {
+ noop.TracerProvider
+ // ...
+ }
+
+It is strongly recommended that authors only embed
+[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior.
+That implementation is the only one OpenTelemetry authors can guarantee will
+fully implement all the API interfaces when a user updates their API.
+*/
+package trace // import "go.opentelemetry.io/otel/trace"
diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md
new file mode 100644
index 000000000..7754a239e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md
@@ -0,0 +1,3 @@
+# Trace Embedded
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded)
diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go
new file mode 100644
index 000000000..3e359a00b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go
@@ -0,0 +1,45 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package embedded provides interfaces embedded within the [OpenTelemetry
+// trace API].
+//
+// Implementers of the [OpenTelemetry trace API] can embed the relevant type
+// from this package into their implementation directly. Doing so will result
+// in a compilation error for users when the [OpenTelemetry trace API] is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+//
+// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace
+package embedded // import "go.opentelemetry.io/otel/trace/embedded"
+
+// TracerProvider is embedded in
+// [go.opentelemetry.io/otel/trace.TracerProvider].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to
+// experience a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type TracerProvider interface{ tracerProvider() }
+
+// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a
+// compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface
+// is extended (which is something that can happen without a major version bump
+// of the API package).
+type Tracer interface{ tracer() }
+
+// Span is embedded in [go.opentelemetry.io/otel/trace.Span].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a
+// compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Span interface{ span() }
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go
new file mode 100644
index 000000000..f663547b4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go
@@ -0,0 +1,58 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+// Attr is a key-value pair.
+type Attr struct {
+ Key string `json:"key,omitempty"`
+ Value Value `json:"value,omitempty"`
+}
+
+// String returns an Attr for a string value.
+func String(key, value string) Attr {
+ return Attr{key, StringValue(value)}
+}
+
+// Int64 returns an Attr for an int64 value.
+func Int64(key string, value int64) Attr {
+ return Attr{key, Int64Value(value)}
+}
+
+// Int returns an Attr for an int value.
+func Int(key string, value int) Attr {
+ return Int64(key, int64(value))
+}
+
+// Float64 returns an Attr for a float64 value.
+func Float64(key string, value float64) Attr {
+ return Attr{key, Float64Value(value)}
+}
+
+// Bool returns an Attr for a bool value.
+func Bool(key string, value bool) Attr {
+ return Attr{key, BoolValue(value)}
+}
+
+// Bytes returns an Attr for a []byte value.
+// The passed slice must not be changed after it is passed.
+func Bytes(key string, value []byte) Attr {
+ return Attr{key, BytesValue(value)}
+}
+
+// Slice returns an Attr for a []Value value.
+// The passed slice must not be changed after it is passed.
+func Slice(key string, value ...Value) Attr {
+ return Attr{key, SliceValue(value...)}
+}
+
+// Map returns an Attr for a map value.
+// The passed slice must not be changed after it is passed.
+func Map(key string, value ...Attr) Attr {
+ return Attr{key, MapValue(value...)}
+}
+
+// Equal returns if a is equal to b.
+func (a Attr) Equal(b Attr) bool {
+ return a.Key == b.Key && a.Value.Equal(b.Value)
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go
new file mode 100644
index 000000000..5debe90bb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go
@@ -0,0 +1,8 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package telemetry provides a lightweight representations of OpenTelemetry
+telemetry that is compatible with the OTLP JSON protobuf encoding.
+*/
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go
new file mode 100644
index 000000000..7b1ae3c4e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go
@@ -0,0 +1,103 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+)
+
+const (
+ traceIDSize = 16
+ spanIDSize = 8
+)
+
+// TraceID is a custom data type that is used for all trace IDs.
+type TraceID [traceIDSize]byte
+
+// String returns the hex string representation form of a TraceID.
+func (tid TraceID) String() string {
+ return hex.EncodeToString(tid[:])
+}
+
+// IsEmpty returns false if id contains at least one non-zero byte.
+func (tid TraceID) IsEmpty() bool {
+ return tid == [traceIDSize]byte{}
+}
+
+// MarshalJSON converts the trace ID into a hex string enclosed in quotes.
+func (tid TraceID) MarshalJSON() ([]byte, error) {
+ if tid.IsEmpty() {
+ return []byte(`""`), nil
+ }
+ return marshalJSON(tid[:])
+}
+
+// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in
+// quotes.
+func (tid *TraceID) UnmarshalJSON(data []byte) error {
+ *tid = [traceIDSize]byte{}
+ return unmarshalJSON(tid[:], data)
+}
+
+// SpanID is a custom data type that is used for all span IDs.
+type SpanID [spanIDSize]byte
+
+// String returns the hex string representation form of a SpanID.
+func (sid SpanID) String() string {
+ return hex.EncodeToString(sid[:])
+}
+
+// IsEmpty returns true if the span ID contains at least one non-zero byte.
+func (sid SpanID) IsEmpty() bool {
+ return sid == [spanIDSize]byte{}
+}
+
+// MarshalJSON converts span ID into a hex string enclosed in quotes.
+func (sid SpanID) MarshalJSON() ([]byte, error) {
+ if sid.IsEmpty() {
+ return []byte(`""`), nil
+ }
+ return marshalJSON(sid[:])
+}
+
+// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes.
+func (sid *SpanID) UnmarshalJSON(data []byte) error {
+ *sid = [spanIDSize]byte{}
+ return unmarshalJSON(sid[:], data)
+}
+
+// marshalJSON converts id into a hex string enclosed in quotes.
+func marshalJSON(id []byte) ([]byte, error) {
+ // Plus 2 quote chars at the start and end.
+ hexLen := hex.EncodedLen(len(id)) + 2
+
+ b := make([]byte, hexLen)
+ hex.Encode(b[1:hexLen-1], id)
+ b[0], b[hexLen-1] = '"', '"'
+
+ return b, nil
+}
+
+// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
+func unmarshalJSON(dst []byte, src []byte) error {
+ if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
+ src = src[1 : l-1]
+ }
+ nLen := len(src)
+ if nLen == 0 {
+ return nil
+ }
+
+ if len(dst) != hex.DecodedLen(nLen) {
+ return errors.New("invalid length for ID")
+ }
+
+ _, err := hex.Decode(dst, src)
+ if err != nil {
+ return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go
new file mode 100644
index 000000000..f5e3a8cec
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go
@@ -0,0 +1,67 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "encoding/json"
+ "strconv"
+)
+
+// protoInt64 represents the protobuf encoding of integers which can be either
+// strings or integers.
+type protoInt64 int64
+
+// Int64 returns the protoInt64 as an int64.
+func (i *protoInt64) Int64() int64 { return int64(*i) }
+
+// UnmarshalJSON decodes both strings and integers.
+func (i *protoInt64) UnmarshalJSON(data []byte) error {
+ if data[0] == '"' {
+ var str string
+ if err := json.Unmarshal(data, &str); err != nil {
+ return err
+ }
+ parsedInt, err := strconv.ParseInt(str, 10, 64)
+ if err != nil {
+ return err
+ }
+ *i = protoInt64(parsedInt)
+ } else {
+ var parsedInt int64
+ if err := json.Unmarshal(data, &parsedInt); err != nil {
+ return err
+ }
+ *i = protoInt64(parsedInt)
+ }
+ return nil
+}
+
+// protoUint64 represents the protobuf encoding of integers which can be either
+// strings or integers.
+type protoUint64 uint64
+
+// Int64 returns the protoUint64 as a uint64.
+func (i *protoUint64) Uint64() uint64 { return uint64(*i) }
+
+// UnmarshalJSON decodes both strings and integers.
+func (i *protoUint64) UnmarshalJSON(data []byte) error {
+ if data[0] == '"' {
+ var str string
+ if err := json.Unmarshal(data, &str); err != nil {
+ return err
+ }
+ parsedUint, err := strconv.ParseUint(str, 10, 64)
+ if err != nil {
+ return err
+ }
+ *i = protoUint64(parsedUint)
+ } else {
+ var parsedUint uint64
+ if err := json.Unmarshal(data, &parsedUint); err != nil {
+ return err
+ }
+ *i = protoUint64(parsedUint)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go
new file mode 100644
index 000000000..1798a702d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go
@@ -0,0 +1,66 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Resource information.
+type Resource struct {
+ // Attrs are the set of attributes that describe the resource. Attribute
+ // keys MUST be unique (it is not allowed to have more than one attribute
+ // with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // DroppedAttrs is the number of dropped attributes. If the value
+ // is 0, then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
+func (r *Resource) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Resource type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Resource field: %#v", keyIface)
+ }
+
+ switch key {
+ case "attributes":
+ err = decoder.Decode(&r.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&r.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go
new file mode 100644
index 000000000..c2b4c635b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go
@@ -0,0 +1,67 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Scope is the identifying values of the instrumentation scope.
+type Scope struct {
+ Name string `json:"name,omitempty"`
+ Version string `json:"version,omitempty"`
+ Attrs []Attr `json:"attributes,omitempty"`
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
+func (s *Scope) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Scope type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Scope field: %#v", keyIface)
+ }
+
+ switch key {
+ case "name":
+ err = decoder.Decode(&s.Name)
+ case "version":
+ err = decoder.Decode(&s.Version)
+ case "attributes":
+ err = decoder.Decode(&s.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&s.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go
new file mode 100644
index 000000000..e7ca62c66
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go
@@ -0,0 +1,472 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "time"
+)
+
+// A Span represents a single operation performed by a single component of the
+// system.
+type Span struct {
+ // A unique identifier for a trace. All spans from the same trace share
+ // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
+ // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
+ //
+ // This field is required.
+ TraceID TraceID `json:"traceId,omitempty"`
+ // A unique identifier for a span within a trace, assigned when the span
+ // is created. The ID is an 8-byte array. An ID with all zeroes OR of length
+ // other than 8 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
+ //
+ // This field is required.
+ SpanID SpanID `json:"spanId,omitempty"`
+ // trace_state conveys information about request position in multiple distributed tracing graphs.
+ // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
+ // See also https://github.com/w3c/distributed-tracing for more details about this field.
+ TraceState string `json:"traceState,omitempty"`
+ // The `span_id` of this span's parent span. If this is a root span, then this
+ // field must be empty. The ID is an 8-byte array.
+ ParentSpanID SpanID `json:"parentSpanId,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether a span's parent
+ // is remote. The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // When creating span messages, if the message is logically forwarded from another source
+ // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD
+ // be copied as-is. If creating from a source that does not have an equivalent flags field
+ // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST
+ // be set to zero.
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ //
+ // [Optional].
+ Flags uint32 `json:"flags,omitempty"`
+ // A description of the span's operation.
+ //
+ // For example, the name can be a qualified method name or a file name
+ // and a line number where the operation is called. A best practice is to use
+ // the same display name at the same call point in an application.
+ // This makes it easier to correlate spans in different traces.
+ //
+ // This field is semantically required to be set to non-empty string.
+ // Empty value is equivalent to an unknown span name.
+ //
+ // This field is required.
+ Name string `json:"name"`
+ // Distinguishes between spans generated in a particular context. For example,
+ // two spans with the same name may be distinguished using `CLIENT` (caller)
+ // and `SERVER` (callee) to identify queueing latency associated with the span.
+ Kind SpanKind `json:"kind,omitempty"`
+ // start_time_unix_nano is the start time of the span. On the client side, this is the time
+ // kept by the local machine where the span execution starts. On the server side, this
+ // is the time when the server's application handler starts running.
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+ //
+ // This field is semantically required and it is expected that end_time >= start_time.
+ StartTime time.Time `json:"startTimeUnixNano,omitempty"`
+ // end_time_unix_nano is the end time of the span. On the client side, this is the time
+ // kept by the local machine where the span execution ends. On the server side, this
+ // is the time when the server application handler stops running.
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+ //
+ // This field is semantically required and it is expected that end_time >= start_time.
+ EndTime time.Time `json:"endTimeUnixNano,omitempty"`
+ // attributes is a collection of key/value pairs. Note, global attributes
+ // like server name can be set using the resource API. Examples of attributes:
+ //
+ // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
+ // "/http/server_latency": 300
+ // "example.com/myattribute": true
+ // "example.com/score": 10.239
+ //
+ // The OpenTelemetry API specification further restricts the allowed value types:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of attributes that were discarded. Attributes
+ // can be discarded because their keys are too long or because there are too many
+ // attributes. If this value is 0, then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+ // events is a collection of Event items.
+ Events []*SpanEvent `json:"events,omitempty"`
+ // dropped_events_count is the number of dropped events. If the value is 0, then no
+ // events were dropped.
+ DroppedEvents uint32 `json:"droppedEventsCount,omitempty"`
+ // links is a collection of Links, which are references from this span to a span
+ // in the same or different trace.
+ Links []*SpanLink `json:"links,omitempty"`
+ // dropped_links_count is the number of dropped links after the maximum size was
+ // enforced. If this value is 0, then no links were dropped.
+ DroppedLinks uint32 `json:"droppedLinksCount,omitempty"`
+ // An optional final status for this span. Semantically when Status isn't set, it means
+ // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0).
+ Status *Status `json:"status,omitempty"`
+}
+
+// MarshalJSON encodes s into OTLP formatted JSON.
+func (s Span) MarshalJSON() ([]byte, error) {
+ startT := s.StartTime.UnixNano()
+ if s.StartTime.IsZero() || startT < 0 {
+ startT = 0
+ }
+
+ endT := s.EndTime.UnixNano()
+ if s.EndTime.IsZero() || endT < 0 {
+ endT = 0
+ }
+
+ // Override non-empty default SpanID marshal and omitempty.
+ var parentSpanId string
+ if !s.ParentSpanID.IsEmpty() {
+ b := make([]byte, hex.EncodedLen(spanIDSize))
+ hex.Encode(b, s.ParentSpanID[:])
+ parentSpanId = string(b)
+ }
+
+ type Alias Span
+ return json.Marshal(struct {
+ Alias
+ ParentSpanID string `json:"parentSpanId,omitempty"`
+ StartTime uint64 `json:"startTimeUnixNano,omitempty"`
+ EndTime uint64 `json:"endTimeUnixNano,omitempty"`
+ }{
+ Alias: Alias(s),
+ ParentSpanID: parentSpanId,
+ StartTime: uint64(startT), // nolint:gosec // >0 checked above.
+ EndTime: uint64(endT), // nolint:gosec // >0 checked above.
+ })
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s.
+func (s *Span) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Span type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Span field: %#v", keyIface)
+ }
+
+ switch key {
+ case "traceId", "trace_id":
+ err = decoder.Decode(&s.TraceID)
+ case "spanId", "span_id":
+ err = decoder.Decode(&s.SpanID)
+ case "traceState", "trace_state":
+ err = decoder.Decode(&s.TraceState)
+ case "parentSpanId", "parent_span_id":
+ err = decoder.Decode(&s.ParentSpanID)
+ case "flags":
+ err = decoder.Decode(&s.Flags)
+ case "name":
+ err = decoder.Decode(&s.Name)
+ case "kind":
+ err = decoder.Decode(&s.Kind)
+ case "startTimeUnixNano", "start_time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked.
+ s.StartTime = time.Unix(0, v)
+ case "endTimeUnixNano", "end_time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked.
+ s.EndTime = time.Unix(0, v)
+ case "attributes":
+ err = decoder.Decode(&s.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&s.DroppedAttrs)
+ case "events":
+ err = decoder.Decode(&s.Events)
+ case "droppedEventsCount", "dropped_events_count":
+ err = decoder.Decode(&s.DroppedEvents)
+ case "links":
+ err = decoder.Decode(&s.Links)
+ case "droppedLinksCount", "dropped_links_count":
+ err = decoder.Decode(&s.DroppedLinks)
+ case "status":
+ err = decoder.Decode(&s.Status)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// SpanFlags represents constants used to interpret the
+// Span.flags field, which is protobuf 'fixed32' type and is to
+// be used as bit-fields. Each non-zero value defined in this enum is
+// a bit-mask. To extract the bit-field, for example, use an
+// expression like:
+//
+// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
+//
+// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+//
+// Note that Span flags were introduced in version 1.1 of the
+// OpenTelemetry protocol. Older Span producers do not set this
+// field, consequently consumers should not rely on the absence of a
+// particular flag bit to indicate the presence of a particular feature.
+type SpanFlags int32
+
+const (
+ // SpanFlagsTraceFlagsMask is a mask for trace-flags.
+ //
+ // Bits 0-7 are used for trace flags.
+ SpanFlagsTraceFlagsMask SpanFlags = 255
+ // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status.
+ //
+ // Bits 8 and 9 are used to indicate that the parent span or link span is
+ // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
+ SpanFlagsContextHasIsRemoteMask SpanFlags = 256
+ // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status.
+ //
+ // Bits 8 and 9 are used to indicate that the parent span or link span is
+ // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is
+ // remote.
+ SpanFlagsContextIsRemoteMask SpanFlags = 512
+)
+
+// SpanKind is the type of span. Can be used to specify additional relationships between spans
+// in addition to a parent/child relationship.
+type SpanKind int32
+
+const (
+ // SpanKindInternal indicates that the span represents an internal
+ // operation within an application, as opposed to an operation happening at
+ // the boundaries.
+ SpanKindInternal SpanKind = 1
+ // SpanKindServer indicates that the span covers server-side handling of an
+ // RPC or other remote network request.
+ SpanKindServer SpanKind = 2
+ // SpanKindClient indicates that the span describes a request to some
+ // remote service.
+ SpanKindClient SpanKind = 3
+ // SpanKindProducer indicates that the span describes a producer sending a
+ // message to a broker. Unlike SpanKindClient and SpanKindServer, there is
+ // often no direct critical path latency relationship between producer and
+ // consumer spans. A SpanKindProducer span ends when the message was
+ // accepted by the broker while the logical processing of the message might
+ // span a much longer time.
+ SpanKindProducer SpanKind = 4
+ // SpanKindConsumer indicates that the span describes a consumer receiving
+ // a message from a broker. Like SpanKindProducer, there is often no direct
+ // critical path latency relationship between producer and consumer spans.
+ SpanKindConsumer SpanKind = 5
+)
+
+// SpanEvent is a time-stamped annotation of the span, consisting of
+// user-supplied text description and key-value pairs.
+type SpanEvent struct {
+ // time_unix_nano is the time the event occurred.
+ Time time.Time `json:"timeUnixNano,omitempty"`
+ // name of the event.
+ // This field is semantically required to be set to non-empty string.
+ Name string `json:"name,omitempty"`
+ // attributes is a collection of attribute key/value pairs on the event.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// MarshalJSON encodes e into OTLP formatted JSON.
+func (e SpanEvent) MarshalJSON() ([]byte, error) {
+ t := e.Time.UnixNano()
+ if e.Time.IsZero() || t < 0 {
+ t = 0
+ }
+
+ type Alias SpanEvent
+ return json.Marshal(struct {
+ Alias
+ Time uint64 `json:"timeUnixNano,omitempty"`
+ }{
+ Alias: Alias(e),
+ Time: uint64(t), // nolint: gosec // >0 checked above
+ })
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se.
+func (se *SpanEvent) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid SpanEvent type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid SpanEvent field: %#v", keyIface)
+ }
+
+ switch key {
+ case "timeUnixNano", "time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked.
+ se.Time = time.Unix(0, v)
+ case "name":
+ err = decoder.Decode(&se.Name)
+ case "attributes":
+ err = decoder.Decode(&se.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&se.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// SpanLink is a reference from the current span to another span in the same
+// trace or in a different trace. For example, this can be used in batching
+// operations, where a single batch handler processes multiple requests from
+// different traces or when the handler receives a request from a different
+// project.
+type SpanLink struct {
+ // A unique identifier of a trace that this linked span is part of. The ID is a
+ // 16-byte array.
+ TraceID TraceID `json:"traceId,omitempty"`
+ // A unique identifier for the linked span. The ID is an 8-byte array.
+ SpanID SpanID `json:"spanId,omitempty"`
+ // The trace_state associated with the link.
+ TraceState string `json:"traceState,omitempty"`
+ // attributes is a collection of attribute key/value pairs on the link.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether the link is remote.
+ // The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero.
+ //
+ // [Optional].
+ Flags uint32 `json:"flags,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl.
+func (sl *SpanLink) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid SpanLink type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid SpanLink field: %#v", keyIface)
+ }
+
+ switch key {
+ case "traceId", "trace_id":
+ err = decoder.Decode(&sl.TraceID)
+ case "spanId", "span_id":
+ err = decoder.Decode(&sl.SpanID)
+ case "traceState", "trace_state":
+ err = decoder.Decode(&sl.TraceState)
+ case "attributes":
+ err = decoder.Decode(&sl.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&sl.DroppedAttrs)
+ case "flags":
+ err = decoder.Decode(&sl.Flags)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go
new file mode 100644
index 000000000..1039bf40c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go
@@ -0,0 +1,42 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+// StatusCode is the status of a Span.
+//
+// For the semantics of status codes see
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
+type StatusCode int32
+
+const (
+ // StatusCodeUnset is the default status.
+ StatusCodeUnset StatusCode = 0
+ // StatusCodeOK is used when the Span has been validated by an Application
+ // developer or Operator to have completed successfully.
+ StatusCodeOK StatusCode = 1
+ // StatusCodeError is used when the Span contains an error.
+ StatusCodeError StatusCode = 2
+)
+
+var statusCodeStrings = []string{
+ "Unset",
+ "OK",
+ "Error",
+}
+
+func (s StatusCode) String() string {
+ if s >= 0 && int(s) < len(statusCodeStrings) {
+ return statusCodeStrings[s]
+ }
+ return ""
+}
+
+// Status defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs.
+type Status struct {
+ // A developer-facing human readable error message.
+ Message string `json:"message,omitempty"`
+ // The status code.
+ Code StatusCode `json:"code,omitempty"`
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go
new file mode 100644
index 000000000..e5f10767c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go
@@ -0,0 +1,189 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Traces represents the traces data that can be stored in a persistent storage,
+// OR can be embedded by other protocols that transfer OTLP traces data but do
+// not implement the OTLP protocol.
+//
+// The main difference between this message and collector protocol is that
+// in this message there will not be any "control" or "metadata" specific to
+// OTLP protocol.
+//
+// When new fields are added into this message, the OTLP request MUST be updated
+// as well.
+type Traces struct {
+ // An array of ResourceSpans.
+ // For data coming from a single resource this array will typically contain
+ // one element. Intermediary nodes that receive data from multiple origins
+ // typically batch the data before forwarding further and in that case this
+ // array will contain multiple elements.
+ ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td.
+func (td *Traces) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid TracesData type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid TracesData field: %#v", keyIface)
+ }
+
+ switch key {
+ case "resourceSpans", "resource_spans":
+ err = decoder.Decode(&td.ResourceSpans)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ResourceSpans is a collection of ScopeSpans from a Resource.
+type ResourceSpans struct {
+ // The resource for the spans in this message.
+ // If this field is not set then no resource info is known.
+ Resource Resource `json:"resource"`
+ // A list of ScopeSpans that originate from a resource.
+ ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"`
+ // This schema_url applies to the data in the "resource" field. It does not apply
+ // to the data in the "scope_spans" field which have their own schema_url field.
+ SchemaURL string `json:"schemaUrl,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs.
+func (rs *ResourceSpans) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid ResourceSpans type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface)
+ }
+
+ switch key {
+ case "resource":
+ err = decoder.Decode(&rs.Resource)
+ case "scopeSpans", "scope_spans":
+ err = decoder.Decode(&rs.ScopeSpans)
+ case "schemaUrl", "schema_url":
+ err = decoder.Decode(&rs.SchemaURL)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ScopeSpans is a collection of Spans produced by an InstrumentationScope.
+type ScopeSpans struct {
+ // The instrumentation scope information for the spans in this message.
+ // Semantically when InstrumentationScope isn't set, it is equivalent with
+ // an empty instrumentation scope name (unknown).
+ Scope *Scope `json:"scope"`
+ // A list of Spans that originate from an instrumentation scope.
+ Spans []*Span `json:"spans,omitempty"`
+ // The Schema URL, if known. This is the identifier of the Schema that the span data
+ // is recorded in. To learn more about Schema URL see
+ // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+ // This schema_url applies to all spans and span events in the "spans" field.
+ SchemaURL string `json:"schemaUrl,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss.
+func (ss *ScopeSpans) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid ScopeSpans type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface)
+ }
+
+ switch key {
+ case "scope":
+ err = decoder.Decode(&ss.Scope)
+ case "spans":
+ err = decoder.Decode(&ss.Spans)
+ case "schemaUrl", "schema_url":
+ err = decoder.Decode(&ss.SchemaURL)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
new file mode 100644
index 000000000..ae9ce102a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
@@ -0,0 +1,453 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "cmp"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "slices"
+ "strconv"
+ "unsafe"
+)
+
+// A Value represents a structured value.
+// A zero value is valid and represents an empty value.
+type Value struct {
+ // Ensure forward compatibility by explicitly making this not comparable.
+ noCmp [0]func() //nolint: unused // This is indeed used.
+
+ // num holds the value for Int64, Float64, and Bool. It holds the length
+ // for String, Bytes, Slice, Map.
+ num uint64
+ // any holds either the KindBool, KindInt64, KindFloat64, stringptr,
+ // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64
+ // then the value of Value is in num as described above. Otherwise, it
+ // contains the value wrapped in the appropriate type.
+ any any
+}
+
+type (
+ // sliceptr represents a value in Value.any for KindString Values.
+ stringptr *byte
+ // bytesptr represents a value in Value.any for KindBytes Values.
+ bytesptr *byte
+ // sliceptr represents a value in Value.any for KindSlice Values.
+ sliceptr *Value
+ // mapptr represents a value in Value.any for KindMap Values.
+ mapptr *Attr
+)
+
+// ValueKind is the kind of a [Value].
+type ValueKind int
+
+// ValueKind values.
+const (
+ ValueKindEmpty ValueKind = iota
+ ValueKindBool
+ ValueKindFloat64
+ ValueKindInt64
+ ValueKindString
+ ValueKindBytes
+ ValueKindSlice
+ ValueKindMap
+)
+
+var valueKindStrings = []string{
+ "Empty",
+ "Bool",
+ "Float64",
+ "Int64",
+ "String",
+ "Bytes",
+ "Slice",
+ "Map",
+}
+
+func (k ValueKind) String() string {
+ if k >= 0 && int(k) < len(valueKindStrings) {
+ return valueKindStrings[k]
+ }
+ return ""
+}
+
+// StringValue returns a new [Value] for a string.
+func StringValue(v string) Value {
+ return Value{
+ num: uint64(len(v)),
+ any: stringptr(unsafe.StringData(v)),
+ }
+}
+
+// IntValue returns a [Value] for an int.
+func IntValue(v int) Value { return Int64Value(int64(v)) }
+
+// Int64Value returns a [Value] for an int64.
+func Int64Value(v int64) Value {
+ return Value{
+ num: uint64(v), // nolint: gosec // Store raw bytes.
+ any: ValueKindInt64,
+ }
+}
+
+// Float64Value returns a [Value] for a float64.
+func Float64Value(v float64) Value {
+ return Value{num: math.Float64bits(v), any: ValueKindFloat64}
+}
+
+// BoolValue returns a [Value] for a bool.
+func BoolValue(v bool) Value { //nolint:revive // Not a control flag.
+ var n uint64
+ if v {
+ n = 1
+ }
+ return Value{num: n, any: ValueKindBool}
+}
+
+// BytesValue returns a [Value] for a byte slice. The passed slice must not be
+// changed after it is passed.
+func BytesValue(v []byte) Value {
+ return Value{
+ num: uint64(len(v)),
+ any: bytesptr(unsafe.SliceData(v)),
+ }
+}
+
+// SliceValue returns a [Value] for a slice of [Value]. The passed slice must
+// not be changed after it is passed.
+func SliceValue(vs ...Value) Value {
+ return Value{
+ num: uint64(len(vs)),
+ any: sliceptr(unsafe.SliceData(vs)),
+ }
+}
+
+// MapValue returns a new [Value] for a slice of key-value pairs. The passed
+// slice must not be changed after it is passed.
+func MapValue(kvs ...Attr) Value {
+ return Value{
+ num: uint64(len(kvs)),
+ any: mapptr(unsafe.SliceData(kvs)),
+ }
+}
+
+// AsString returns the value held by v as a string.
+func (v Value) AsString() string {
+ if sp, ok := v.any.(stringptr); ok {
+ return unsafe.String(sp, v.num)
+ }
+ // TODO: error handle
+ return ""
+}
+
+// asString returns the value held by v as a string. It will panic if the Value
+// is not KindString.
+func (v Value) asString() string {
+ return unsafe.String(v.any.(stringptr), v.num)
+}
+
+// AsInt64 returns the value held by v as an int64.
+func (v Value) AsInt64() int64 {
+ if v.Kind() != ValueKindInt64 {
+ // TODO: error handle
+ return 0
+ }
+ return v.asInt64()
+}
+
+// asInt64 returns the value held by v as an int64. If v is not of KindInt64,
+// this will return garbage.
+func (v Value) asInt64() int64 {
+ // Assumes v.num was a valid int64 (overflow not checked).
+ return int64(v.num) // nolint: gosec
+}
+
+// AsBool returns the value held by v as a bool.
+func (v Value) AsBool() bool {
+ if v.Kind() != ValueKindBool {
+ // TODO: error handle
+ return false
+ }
+ return v.asBool()
+}
+
+// asBool returns the value held by v as a bool. If v is not of KindBool, this
+// will return garbage.
+func (v Value) asBool() bool { return v.num == 1 }
+
+// AsFloat64 returns the value held by v as a float64.
+func (v Value) AsFloat64() float64 {
+ if v.Kind() != ValueKindFloat64 {
+ // TODO: error handle
+ return 0
+ }
+ return v.asFloat64()
+}
+
+// asFloat64 returns the value held by v as a float64. If v is not of
+// KindFloat64, this will return garbage.
+func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) }
+
+// AsBytes returns the value held by v as a []byte.
+func (v Value) AsBytes() []byte {
+ if sp, ok := v.any.(bytesptr); ok {
+ return unsafe.Slice((*byte)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asBytes returns the value held by v as a []byte. It will panic if the Value
+// is not KindBytes.
+func (v Value) asBytes() []byte {
+ return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num)
+}
+
+// AsSlice returns the value held by v as a []Value.
+func (v Value) AsSlice() []Value {
+ if sp, ok := v.any.(sliceptr); ok {
+ return unsafe.Slice((*Value)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asSlice returns the value held by v as a []Value. It will panic if the Value
+// is not KindSlice.
+func (v Value) asSlice() []Value {
+ return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num)
+}
+
+// AsMap returns the value held by v as a []Attr.
+func (v Value) AsMap() []Attr {
+ if sp, ok := v.any.(mapptr); ok {
+ return unsafe.Slice((*Attr)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asMap returns the value held by v as a []Attr. It will panic if the
+// Value is not KindMap.
+func (v Value) asMap() []Attr {
+ return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num)
+}
+
+// Kind returns the Kind of v.
+func (v Value) Kind() ValueKind {
+ switch x := v.any.(type) {
+ case ValueKind:
+ return x
+ case stringptr:
+ return ValueKindString
+ case bytesptr:
+ return ValueKindBytes
+ case sliceptr:
+ return ValueKindSlice
+ case mapptr:
+ return ValueKindMap
+ default:
+ return ValueKindEmpty
+ }
+}
+
+// Empty returns if v does not hold any value.
+func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty }
+
+// Equal returns if v is equal to w.
+func (v Value) Equal(w Value) bool {
+ k1 := v.Kind()
+ k2 := w.Kind()
+ if k1 != k2 {
+ return false
+ }
+ switch k1 {
+ case ValueKindInt64, ValueKindBool:
+ return v.num == w.num
+ case ValueKindString:
+ return v.asString() == w.asString()
+ case ValueKindFloat64:
+ return v.asFloat64() == w.asFloat64()
+ case ValueKindSlice:
+ return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal)
+ case ValueKindMap:
+ sv := sortMap(v.asMap())
+ sw := sortMap(w.asMap())
+ return slices.EqualFunc(sv, sw, Attr.Equal)
+ case ValueKindBytes:
+ return bytes.Equal(v.asBytes(), w.asBytes())
+ case ValueKindEmpty:
+ return true
+ default:
+ // TODO: error handle
+ return false
+ }
+}
+
+func sortMap(m []Attr) []Attr {
+ sm := make([]Attr, len(m))
+ copy(sm, m)
+ slices.SortFunc(sm, func(a, b Attr) int {
+ return cmp.Compare(a.Key, b.Key)
+ })
+
+ return sm
+}
+
+// String returns Value's value as a string, formatted like [fmt.Sprint].
+//
+// The returned string is meant for debugging;
+// the string representation is not stable.
+func (v Value) String() string {
+ switch v.Kind() {
+ case ValueKindString:
+ return v.asString()
+ case ValueKindInt64:
+ // Assumes v.num was a valid int64 (overflow not checked).
+ return strconv.FormatInt(int64(v.num), 10) // nolint: gosec
+ case ValueKindFloat64:
+ return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64)
+ case ValueKindBool:
+ return strconv.FormatBool(v.asBool())
+ case ValueKindBytes:
+ return string(v.asBytes())
+ case ValueKindMap:
+ return fmt.Sprint(v.asMap())
+ case ValueKindSlice:
+ return fmt.Sprint(v.asSlice())
+ case ValueKindEmpty:
+ return ""
+ default:
+ // Try to handle this as gracefully as possible.
+ //
+ // Don't panic here. The goal here is to have developers find this
+ // first if a slog.Kind is is not handled. It is
+ // preferable to have user's open issue asking why their attributes
+ // have a "unhandled: " prefix than say that their code is panicking.
+ return fmt.Sprintf("", v.Kind())
+ }
+}
+
+// MarshalJSON encodes v into OTLP formatted JSON.
+func (v *Value) MarshalJSON() ([]byte, error) {
+ switch v.Kind() {
+ case ValueKindString:
+ return json.Marshal(struct {
+ Value string `json:"stringValue"`
+ }{v.asString()})
+ case ValueKindInt64:
+ return json.Marshal(struct {
+ Value string `json:"intValue"`
+ }{strconv.FormatInt(int64(v.num), 10)}) // nolint: gosec // From raw bytes.
+ case ValueKindFloat64:
+ return json.Marshal(struct {
+ Value float64 `json:"doubleValue"`
+ }{v.asFloat64()})
+ case ValueKindBool:
+ return json.Marshal(struct {
+ Value bool `json:"boolValue"`
+ }{v.asBool()})
+ case ValueKindBytes:
+ return json.Marshal(struct {
+ Value []byte `json:"bytesValue"`
+ }{v.asBytes()})
+ case ValueKindMap:
+ return json.Marshal(struct {
+ Value struct {
+ Values []Attr `json:"values"`
+ } `json:"kvlistValue"`
+ }{struct {
+ Values []Attr `json:"values"`
+ }{v.asMap()}})
+ case ValueKindSlice:
+ return json.Marshal(struct {
+ Value struct {
+ Values []Value `json:"values"`
+ } `json:"arrayValue"`
+ }{struct {
+ Values []Value `json:"values"`
+ }{v.asSlice()}})
+ case ValueKindEmpty:
+ return nil, nil
+ default:
+ return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String())
+ }
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v.
+func (v *Value) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Value type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Value key: %#v", keyIface)
+ }
+
+ switch key {
+ case "stringValue", "string_value":
+ var val string
+ err = decoder.Decode(&val)
+ *v = StringValue(val)
+ case "boolValue", "bool_value":
+ var val bool
+ err = decoder.Decode(&val)
+ *v = BoolValue(val)
+ case "intValue", "int_value":
+ var val protoInt64
+ err = decoder.Decode(&val)
+ *v = Int64Value(val.Int64())
+ case "doubleValue", "double_value":
+ var val float64
+ err = decoder.Decode(&val)
+ *v = Float64Value(val)
+ case "bytesValue", "bytes_value":
+ var val64 string
+ if err := decoder.Decode(&val64); err != nil {
+ return err
+ }
+ var val []byte
+ val, err = base64.StdEncoding.DecodeString(val64)
+ *v = BytesValue(val)
+ case "arrayValue", "array_value":
+ var val struct{ Values []Value }
+ err = decoder.Decode(&val)
+ *v = SliceValue(val.Values...)
+ case "kvlistValue", "kvlist_value":
+ var val struct{ Values []Attr }
+ err = decoder.Decode(&val)
+ *v = MapValue(val.Values...)
+ default:
+ // Skip unknown.
+ continue
+ }
+ // Use first valid. Ignore the rest.
+ return err
+ }
+
+ // Only unknown fields. Return nil without unmarshaling any value.
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go
new file mode 100644
index 000000000..c00221e7b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go
@@ -0,0 +1,16 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+// nonRecordingSpan is a minimal implementation of a Span that wraps a
+// SpanContext. It performs no operations other than to return the wrapped
+// SpanContext.
+type nonRecordingSpan struct {
+ noopSpan
+
+ sc SpanContext
+}
+
+// SpanContext returns the wrapped SpanContext.
+func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc }
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go
new file mode 100644
index 000000000..0f56e4dbb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/noop.go
@@ -0,0 +1,105 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// NewNoopTracerProvider returns an implementation of TracerProvider that
+// performs no operations. The Tracer and Spans created from the returned
+// TracerProvider also perform no operations.
+//
+// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider]
+// instead.
+func NewNoopTracerProvider() TracerProvider {
+ return noopTracerProvider{}
+}
+
+type noopTracerProvider struct{ embedded.TracerProvider }
+
+var _ TracerProvider = noopTracerProvider{}
+
+// Tracer returns noop implementation of Tracer.
+func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer {
+ return noopTracer{}
+}
+
+// noopTracer is an implementation of Tracer that performs no operations.
+type noopTracer struct{ embedded.Tracer }
+
+var _ Tracer = noopTracer{}
+
+// Start carries forward a non-recording Span, if one is present in the context, otherwise it
+// creates a no-op Span.
+func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) {
+ span := SpanFromContext(ctx)
+ if _, ok := span.(nonRecordingSpan); !ok {
+ // span is likely already a noopSpan, but let's be sure
+ span = noopSpanInstance
+ }
+ return ContextWithSpan(ctx, span), span
+}
+
+// noopSpan is an implementation of Span that performs no operations.
+type noopSpan struct{ embedded.Span }
+
+var noopSpanInstance Span = noopSpan{}
+
+// SpanContext returns an empty span context.
+func (noopSpan) SpanContext() SpanContext { return SpanContext{} }
+
+// IsRecording always returns false.
+func (noopSpan) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (noopSpan) SetStatus(codes.Code, string) {}
+
+// SetError does nothing.
+func (noopSpan) SetError(bool) {}
+
+// SetAttributes does nothing.
+func (noopSpan) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (noopSpan) End(...SpanEndOption) {}
+
+// RecordError does nothing.
+func (noopSpan) RecordError(error, ...EventOption) {}
+
+// AddEvent does nothing.
+func (noopSpan) AddEvent(string, ...EventOption) {}
+
+// AddLink does nothing.
+func (noopSpan) AddLink(Link) {}
+
+// SetName does nothing.
+func (noopSpan) SetName(string) {}
+
+// TracerProvider returns a no-op TracerProvider.
+func (s noopSpan) TracerProvider() TracerProvider {
+ return s.tracerProvider(autoInstEnabled)
+}
+
+// autoInstEnabled defines if the auto-instrumentation SDK is enabled.
+//
+// The auto-instrumentation is expected to overwrite this value to true when it
+// attaches to the process.
+var autoInstEnabled = new(bool)
+
+// tracerProvider return a noopTracerProvider if autoEnabled is false,
+// otherwise it will return a TracerProvider from the sdk package used in
+// auto-instrumentation.
+//
+//go:noinline
+func (noopSpan) tracerProvider(autoEnabled *bool) TracerProvider {
+ if *autoEnabled {
+ return newAutoTracerProvider()
+ }
+ return noopTracerProvider{}
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/README.md b/vendor/go.opentelemetry.io/otel/trace/noop/README.md
new file mode 100644
index 000000000..cd382c82a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/noop/README.md
@@ -0,0 +1,3 @@
+# Trace Noop
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/trace/noop)
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
new file mode 100644
index 000000000..64a4f1b36
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
@@ -0,0 +1,112 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package noop provides an implementation of the OpenTelemetry trace API that
+// produces no telemetry and minimizes used computation resources.
+//
+// Using this package to implement the OpenTelemetry trace API will effectively
+// disable OpenTelemetry.
+//
+// This implementation can be embedded in other implementations of the
+// OpenTelemetry trace API. Doing so will mean the implementation defaults to
+// no operation for methods it does not implement.
+package noop // import "go.opentelemetry.io/otel/trace/noop"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+var (
+ // Compile-time check this implements the OpenTelemetry API.
+
+ _ trace.TracerProvider = TracerProvider{}
+ _ trace.Tracer = Tracer{}
+ _ trace.Span = Span{}
+)
+
+// TracerProvider is an OpenTelemetry No-Op TracerProvider.
+type TracerProvider struct{ embedded.TracerProvider }
+
+// NewTracerProvider returns a TracerProvider that does not record any telemetry.
+func NewTracerProvider() TracerProvider {
+ return TracerProvider{}
+}
+
+// Tracer returns an OpenTelemetry Tracer that does not record any telemetry.
+func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer {
+ return Tracer{}
+}
+
+// Tracer is an OpenTelemetry No-Op Tracer.
+type Tracer struct{ embedded.Tracer }
+
+// Start creates a span. The created span will be set in a child context of ctx
+// and returned with the span.
+//
+// If ctx contains a span context, the returned span will also contain that
+// span context. If the span context in ctx is for a non-recording span, that
+// span instance will be returned directly.
+func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) {
+ span := trace.SpanFromContext(ctx)
+
+ // If the parent context contains a non-zero span context, that span
+ // context needs to be returned as a non-recording span
+ // (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk).
+ var zeroSC trace.SpanContext
+ if sc := span.SpanContext(); !sc.Equal(zeroSC) {
+ if !span.IsRecording() {
+ // If the span is not recording return it directly.
+ return ctx, span
+ }
+ // Otherwise, return the span context needs in a non-recording span.
+ span = Span{sc: sc}
+ } else {
+ // No parent, return a No-Op span with an empty span context.
+ span = noopSpanInstance
+ }
+ return trace.ContextWithSpan(ctx, span), span
+}
+
+var noopSpanInstance trace.Span = Span{}
+
+// Span is an OpenTelemetry No-Op Span.
+type Span struct {
+ embedded.Span
+
+ sc trace.SpanContext
+}
+
+// SpanContext returns an empty span context.
+func (s Span) SpanContext() trace.SpanContext { return s.sc }
+
+// IsRecording always returns false.
+func (Span) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (Span) SetStatus(codes.Code, string) {}
+
+// SetAttributes does nothing.
+func (Span) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (Span) End(...trace.SpanEndOption) {}
+
+// RecordError does nothing.
+func (Span) RecordError(error, ...trace.EventOption) {}
+
+// AddEvent does nothing.
+func (Span) AddEvent(string, ...trace.EventOption) {}
+
+// AddLink does nothing.
+func (Span) AddLink(trace.Link) {}
+
+// SetName does nothing.
+func (Span) SetName(string) {}
+
+// TracerProvider returns a No-Op TracerProvider.
+func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} }
diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go
new file mode 100644
index 000000000..ef85cb70c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/provider.go
@@ -0,0 +1,59 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import "go.opentelemetry.io/otel/trace/embedded"
+
+// TracerProvider provides Tracers that are used by instrumentation code to
+// trace computational workflows.
+//
+// A TracerProvider is the collection destination of all Spans from Tracers it
+// provides, it represents a unique telemetry collection pipeline. How that
+// pipeline is defined, meaning how those Spans are collected, processed, and
+// where they are exported, depends on its implementation. Instrumentation
+// authors do not need to define this implementation, rather just use the
+// provided Tracers to instrument code.
+//
+// Commonly, instrumentation code will accept a TracerProvider implementation
+// at runtime from its users or it can simply use the globally registered one
+// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type TracerProvider interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.TracerProvider
+
+ // Tracer returns a unique Tracer scoped to be used by instrumentation code
+ // to trace computational workflows. The scope and identity of that
+ // instrumentation code is uniquely defined by the name and options passed.
+ //
+ // The passed name needs to uniquely identify instrumentation code.
+ // Therefore, it is recommended that name is the Go package name of the
+ // library providing instrumentation (note: not the code being
+ // instrumented). Instrumentation libraries can have multiple versions,
+ // therefore, the WithInstrumentationVersion option should be used to
+ // distinguish these different codebases. Additionally, instrumentation
+ // libraries may sometimes use traces to communicate different domains of
+ // workflow data (i.e. using spans to communicate workflow events only). If
+ // this is the case, the WithScopeAttributes option should be used to
+ // uniquely identify Tracers that handle the different domains of workflow
+ // data.
+ //
+ // If the same name and options are passed multiple times, the same Tracer
+ // will be returned (it is up to the implementation if this will be the
+ // same underlying instance of that Tracer or not). It is not necessary to
+ // call this multiple times with the same name and options to get an
+ // up-to-date Tracer. All implementations will ensure any TracerProvider
+ // configuration changes are propagated to all provided Tracers.
+ //
+ // If name is empty, then an implementation defined default name will be
+ // used instead.
+ //
+ // This method is safe to call concurrently.
+ Tracer(name string, options ...TracerOption) Tracer
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go
new file mode 100644
index 000000000..d3aa476ee
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/span.go
@@ -0,0 +1,177 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// Span is the individual component of a trace. It represents a single named
+// and timed operation of a workflow that is traced. A Tracer is used to
+// create a Span and it is then up to the operation the Span represents to
+// properly end the Span when the operation itself ends.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Span interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Span
+
+ // End completes the Span. The Span is considered complete and ready to be
+ // delivered through the rest of the telemetry pipeline after this method
+ // is called. Therefore, updates to the Span are not allowed after this
+ // method has been called.
+ End(options ...SpanEndOption)
+
+ // AddEvent adds an event with the provided name and options.
+ AddEvent(name string, options ...EventOption)
+
+ // AddLink adds a link.
+ // Adding links at span creation using WithLinks is preferred to calling AddLink
+ // later, for contexts that are available during span creation, because head
+ // sampling decisions can only consider information present during span creation.
+ AddLink(link Link)
+
+ // IsRecording returns the recording state of the Span. It will return
+ // true if the Span is active and events can be recorded.
+ IsRecording() bool
+
+ // RecordError will record err as an exception span event for this span. An
+ // additional call to SetStatus is required if the Status of the Span should
+ // be set to Error, as this method does not change the Span status. If this
+ // span is not being recorded or err is nil then this method does nothing.
+ RecordError(err error, options ...EventOption)
+
+ // SpanContext returns the SpanContext of the Span. The returned SpanContext
+ // is usable even after the End method has been called for the Span.
+ SpanContext() SpanContext
+
+ // SetStatus sets the status of the Span in the form of a code and a
+ // description, provided the status hasn't already been set to a higher
+ // value before (OK > Error > Unset). The description is only included in a
+ // status when the code is for an error.
+ SetStatus(code codes.Code, description string)
+
+ // SetName sets the Span name.
+ SetName(name string)
+
+ // SetAttributes sets kv as attributes of the Span. If a key from kv
+ // already exists for an attribute of the Span it will be overwritten with
+ // the value contained in kv.
+ SetAttributes(kv ...attribute.KeyValue)
+
+ // TracerProvider returns a TracerProvider that can be used to generate
+ // additional Spans on the same telemetry pipeline as the current Span.
+ TracerProvider() TracerProvider
+}
+
+// Link is the relationship between two Spans. The relationship can be within
+// the same Trace or across different Traces.
+//
+// For example, a Link is used in the following situations:
+//
+// 1. Batch Processing: A batch of operations may contain operations
+// associated with one or more traces/spans. Since there can only be one
+// parent SpanContext, a Link is used to keep reference to the
+// SpanContext of all operations in the batch.
+// 2. Public Endpoint: A SpanContext for an in incoming client request on a
+// public endpoint should be considered untrusted. In such a case, a new
+// trace with its own identity and sampling decision needs to be created,
+// but this new trace needs to be related to the original trace in some
+// form. A Link is used to keep reference to the original SpanContext and
+// track the relationship.
+type Link struct {
+ // SpanContext of the linked Span.
+ SpanContext SpanContext
+
+ // Attributes describe the aspects of the link.
+ Attributes []attribute.KeyValue
+}
+
+// LinkFromContext returns a link encapsulating the SpanContext in the provided
+// ctx.
+func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
+ return Link{
+ SpanContext: SpanContextFromContext(ctx),
+ Attributes: attrs,
+ }
+}
+
+// SpanKind is the role a Span plays in a Trace.
+type SpanKind int
+
+// As a convenience, these match the proto definition, see
+// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
+//
+// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
+// to coerce a span kind to a valid value.
+const (
+ // SpanKindUnspecified is an unspecified SpanKind and is not a valid
+ // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
+ // if it is received.
+ SpanKindUnspecified SpanKind = 0
+ // SpanKindInternal is a SpanKind for a Span that represents an internal
+ // operation within an application.
+ SpanKindInternal SpanKind = 1
+ // SpanKindServer is a SpanKind for a Span that represents the operation
+ // of handling a request from a client.
+ SpanKindServer SpanKind = 2
+ // SpanKindClient is a SpanKind for a Span that represents the operation
+ // of client making a request to a server.
+ SpanKindClient SpanKind = 3
+ // SpanKindProducer is a SpanKind for a Span that represents the operation
+ // of a producer sending a message to a message broker. Unlike
+ // SpanKindClient and SpanKindServer, there is often no direct
+ // relationship between this kind of Span and a SpanKindConsumer kind. A
+ // SpanKindProducer Span will end once the message is accepted by the
+ // message broker which might not overlap with the processing of that
+ // message.
+ SpanKindProducer SpanKind = 4
+ // SpanKindConsumer is a SpanKind for a Span that represents the operation
+ // of a consumer receiving a message from a message broker. Like
+ // SpanKindProducer Spans, there is often no direct relationship between
+ // this Span and the Span that produced the message.
+ SpanKindConsumer SpanKind = 5
+)
+
+// ValidateSpanKind returns a valid span kind value. This will coerce
+// invalid values into the default value, SpanKindInternal.
+func ValidateSpanKind(spanKind SpanKind) SpanKind {
+ switch spanKind {
+ case SpanKindInternal,
+ SpanKindServer,
+ SpanKindClient,
+ SpanKindProducer,
+ SpanKindConsumer:
+ // valid
+ return spanKind
+ default:
+ return SpanKindInternal
+ }
+}
+
+// String returns the specified name of the SpanKind in lower-case.
+func (sk SpanKind) String() string {
+ switch sk {
+ case SpanKindInternal:
+ return "internal"
+ case SpanKindServer:
+ return "server"
+ case SpanKindClient:
+ return "client"
+ case SpanKindProducer:
+ return "producer"
+ case SpanKindConsumer:
+ return "consumer"
+ default:
+ return "unspecified"
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go
new file mode 100644
index 000000000..d49adf671
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/trace.go
@@ -0,0 +1,323 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+)
+
+const (
+ // FlagsSampled is a bitmask with the sampled bit set. A SpanContext
+ // with the sampling bit set means the span is sampled.
+ FlagsSampled = TraceFlags(0x01)
+
+ errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase"
+
+ errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32"
+ errNilTraceID errorConst = "trace-id can't be all zero"
+
+ errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16"
+ errNilSpanID errorConst = "span-id can't be all zero"
+)
+
+type errorConst string
+
+func (e errorConst) Error() string {
+ return string(e)
+}
+
+// TraceID is a unique identity of a trace.
+// nolint:revive // revive complains about stutter of `trace.TraceID`.
+type TraceID [16]byte
+
+var (
+ nilTraceID TraceID
+ _ json.Marshaler = nilTraceID
+)
+
+// IsValid checks whether the trace TraceID is valid. A valid trace ID does
+// not consist of zeros only.
+func (t TraceID) IsValid() bool {
+ return !bytes.Equal(t[:], nilTraceID[:])
+}
+
+// MarshalJSON implements a custom marshal function to encode TraceID
+// as a hex string.
+func (t TraceID) MarshalJSON() ([]byte, error) {
+ return json.Marshal(t.String())
+}
+
+// String returns the hex string representation form of a TraceID.
+func (t TraceID) String() string {
+ return hex.EncodeToString(t[:])
+}
+
+// SpanID is a unique identity of a span in a trace.
+type SpanID [8]byte
+
+var (
+ nilSpanID SpanID
+ _ json.Marshaler = nilSpanID
+)
+
+// IsValid checks whether the SpanID is valid. A valid SpanID does not consist
+// of zeros only.
+func (s SpanID) IsValid() bool {
+ return !bytes.Equal(s[:], nilSpanID[:])
+}
+
+// MarshalJSON implements a custom marshal function to encode SpanID
+// as a hex string.
+func (s SpanID) MarshalJSON() ([]byte, error) {
+ return json.Marshal(s.String())
+}
+
+// String returns the hex string representation form of a SpanID.
+func (s SpanID) String() string {
+ return hex.EncodeToString(s[:])
+}
+
+// TraceIDFromHex returns a TraceID from a hex string if it is compliant with
+// the W3C trace-context specification. See more at
+// https://www.w3.org/TR/trace-context/#trace-id
+// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`.
+func TraceIDFromHex(h string) (TraceID, error) {
+ t := TraceID{}
+ if len(h) != 32 {
+ return t, errInvalidTraceIDLength
+ }
+
+ if err := decodeHex(h, t[:]); err != nil {
+ return t, err
+ }
+
+ if !t.IsValid() {
+ return t, errNilTraceID
+ }
+ return t, nil
+}
+
+// SpanIDFromHex returns a SpanID from a hex string if it is compliant
+// with the w3c trace-context specification.
+// See more at https://www.w3.org/TR/trace-context/#parent-id
+func SpanIDFromHex(h string) (SpanID, error) {
+ s := SpanID{}
+ if len(h) != 16 {
+ return s, errInvalidSpanIDLength
+ }
+
+ if err := decodeHex(h, s[:]); err != nil {
+ return s, err
+ }
+
+ if !s.IsValid() {
+ return s, errNilSpanID
+ }
+ return s, nil
+}
+
+func decodeHex(h string, b []byte) error {
+ for _, r := range h {
+ switch {
+ case 'a' <= r && r <= 'f':
+ continue
+ case '0' <= r && r <= '9':
+ continue
+ default:
+ return errInvalidHexID
+ }
+ }
+
+ decoded, err := hex.DecodeString(h)
+ if err != nil {
+ return err
+ }
+
+ copy(b, decoded)
+ return nil
+}
+
+// TraceFlags contains flags that can be set on a SpanContext.
+type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`.
+
+// IsSampled returns if the sampling bit is set in the TraceFlags.
+func (tf TraceFlags) IsSampled() bool {
+ return tf&FlagsSampled == FlagsSampled
+}
+
+// WithSampled sets the sampling bit in a new copy of the TraceFlags.
+func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag.
+ if sampled {
+ return tf | FlagsSampled
+ }
+
+ return tf &^ FlagsSampled
+}
+
+// MarshalJSON implements a custom marshal function to encode TraceFlags
+// as a hex string.
+func (tf TraceFlags) MarshalJSON() ([]byte, error) {
+ return json.Marshal(tf.String())
+}
+
+// String returns the hex string representation form of TraceFlags.
+func (tf TraceFlags) String() string {
+ return hex.EncodeToString([]byte{byte(tf)}[:])
+}
+
+// SpanContextConfig contains mutable fields usable for constructing
+// an immutable SpanContext.
+type SpanContextConfig struct {
+ TraceID TraceID
+ SpanID SpanID
+ TraceFlags TraceFlags
+ TraceState TraceState
+ Remote bool
+}
+
+// NewSpanContext constructs a SpanContext using values from the provided
+// SpanContextConfig.
+func NewSpanContext(config SpanContextConfig) SpanContext {
+ return SpanContext{
+ traceID: config.TraceID,
+ spanID: config.SpanID,
+ traceFlags: config.TraceFlags,
+ traceState: config.TraceState,
+ remote: config.Remote,
+ }
+}
+
+// SpanContext contains identifying trace information about a Span.
+type SpanContext struct {
+ traceID TraceID
+ spanID SpanID
+ traceFlags TraceFlags
+ traceState TraceState
+ remote bool
+}
+
+var _ json.Marshaler = SpanContext{}
+
+// IsValid returns if the SpanContext is valid. A valid span context has a
+// valid TraceID and SpanID.
+func (sc SpanContext) IsValid() bool {
+ return sc.HasTraceID() && sc.HasSpanID()
+}
+
+// IsRemote indicates whether the SpanContext represents a remotely-created Span.
+func (sc SpanContext) IsRemote() bool {
+ return sc.remote
+}
+
+// WithRemote returns a copy of sc with the Remote property set to remote.
+func (sc SpanContext) WithRemote(remote bool) SpanContext {
+ return SpanContext{
+ traceID: sc.traceID,
+ spanID: sc.spanID,
+ traceFlags: sc.traceFlags,
+ traceState: sc.traceState,
+ remote: remote,
+ }
+}
+
+// TraceID returns the TraceID from the SpanContext.
+func (sc SpanContext) TraceID() TraceID {
+ return sc.traceID
+}
+
+// HasTraceID checks if the SpanContext has a valid TraceID.
+func (sc SpanContext) HasTraceID() bool {
+ return sc.traceID.IsValid()
+}
+
+// WithTraceID returns a new SpanContext with the TraceID replaced.
+func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext {
+ return SpanContext{
+ traceID: traceID,
+ spanID: sc.spanID,
+ traceFlags: sc.traceFlags,
+ traceState: sc.traceState,
+ remote: sc.remote,
+ }
+}
+
+// SpanID returns the SpanID from the SpanContext.
+func (sc SpanContext) SpanID() SpanID {
+ return sc.spanID
+}
+
+// HasSpanID checks if the SpanContext has a valid SpanID.
+func (sc SpanContext) HasSpanID() bool {
+ return sc.spanID.IsValid()
+}
+
+// WithSpanID returns a new SpanContext with the SpanID replaced.
+func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext {
+ return SpanContext{
+ traceID: sc.traceID,
+ spanID: spanID,
+ traceFlags: sc.traceFlags,
+ traceState: sc.traceState,
+ remote: sc.remote,
+ }
+}
+
+// TraceFlags returns the flags from the SpanContext.
+func (sc SpanContext) TraceFlags() TraceFlags {
+ return sc.traceFlags
+}
+
+// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags.
+func (sc SpanContext) IsSampled() bool {
+ return sc.traceFlags.IsSampled()
+}
+
+// WithTraceFlags returns a new SpanContext with the TraceFlags replaced.
+func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext {
+ return SpanContext{
+ traceID: sc.traceID,
+ spanID: sc.spanID,
+ traceFlags: flags,
+ traceState: sc.traceState,
+ remote: sc.remote,
+ }
+}
+
+// TraceState returns the TraceState from the SpanContext.
+func (sc SpanContext) TraceState() TraceState {
+ return sc.traceState
+}
+
+// WithTraceState returns a new SpanContext with the TraceState replaced.
+func (sc SpanContext) WithTraceState(state TraceState) SpanContext {
+ return SpanContext{
+ traceID: sc.traceID,
+ spanID: sc.spanID,
+ traceFlags: sc.traceFlags,
+ traceState: state,
+ remote: sc.remote,
+ }
+}
+
+// Equal is a predicate that determines whether two SpanContext values are equal.
+func (sc SpanContext) Equal(other SpanContext) bool {
+ return sc.traceID == other.traceID &&
+ sc.spanID == other.spanID &&
+ sc.traceFlags == other.traceFlags &&
+ sc.traceState.String() == other.traceState.String() &&
+ sc.remote == other.remote
+}
+
+// MarshalJSON implements a custom marshal function to encode a SpanContext.
+func (sc SpanContext) MarshalJSON() ([]byte, error) {
+ return json.Marshal(SpanContextConfig{
+ TraceID: sc.traceID,
+ SpanID: sc.spanID,
+ TraceFlags: sc.traceFlags,
+ TraceState: sc.traceState,
+ Remote: sc.remote,
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go
new file mode 100644
index 000000000..77952d2a0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go
@@ -0,0 +1,37 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// Tracer is the creator of Spans.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Tracer interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Tracer
+
+ // Start creates a span and a context.Context containing the newly-created span.
+ //
+ // If the context.Context provided in `ctx` contains a Span then the newly-created
+ // Span will be a child of that span, otherwise it will be a root span. This behavior
+ // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
+ // newly-created Span to be a root span even if `ctx` contains a Span.
+ //
+ // When creating a Span it is recommended to provide all known span attributes using
+ // the `WithAttributes()` SpanOption as samplers will only have access to the
+ // attributes provided when a Span is created.
+ //
+ // Any Span that is created MUST also be ended. This is the responsibility of the user.
+ // Implementations of this API may leak memory or other resources if Spans are not ended.
+ Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
new file mode 100644
index 000000000..dc5e34cad
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
@@ -0,0 +1,330 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+const (
+ maxListMembers = 32
+
+ listDelimiters = ","
+ memberDelimiter = "="
+
+ errInvalidKey errorConst = "invalid tracestate key"
+ errInvalidValue errorConst = "invalid tracestate value"
+ errInvalidMember errorConst = "invalid tracestate list-member"
+ errMemberNumber errorConst = "too many list-members in tracestate"
+ errDuplicate errorConst = "duplicate list-member in tracestate"
+)
+
+type member struct {
+ Key string
+ Value string
+}
+
+// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) )
+// means (chr = %x20-2B / %x2D-3C / %x3E-7E) .
+func checkValueChar(v byte) bool {
+ return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d'
+}
+
+// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) .
+func checkValueLast(v byte) bool {
+ return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d'
+}
+
+// based on the W3C Trace Context specification
+//
+// value = (0*255(chr)) nblk-chr
+// nblk-chr = %x21-2B / %x2D-3C / %x3E-7E
+// chr = %x20 / nblk-chr
+//
+// see https://www.w3.org/TR/trace-context-1/#value
+func checkValue(val string) bool {
+ n := len(val)
+ if n == 0 || n > 256 {
+ return false
+ }
+ for i := 0; i < n-1; i++ {
+ if !checkValueChar(val[i]) {
+ return false
+ }
+ }
+ return checkValueLast(val[n-1])
+}
+
+func checkKeyRemain(key string) bool {
+ // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
+ for _, v := range key {
+ if isAlphaNum(byte(v)) {
+ continue
+ }
+ switch v {
+ case '_', '-', '*', '/':
+ continue
+ }
+ return false
+ }
+ return true
+}
+
+// according to
+//
+// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+//
+// param n is remain part length, should be 255 in simple-key or 13 in system-id.
+func checkKeyPart(key string, n int) bool {
+ if len(key) == 0 {
+ return false
+ }
+ first := key[0] // key's first char
+ ret := len(key[1:]) <= n
+ ret = ret && first >= 'a' && first <= 'z'
+ return ret && checkKeyRemain(key[1:])
+}
+
+func isAlphaNum(c byte) bool {
+ if c >= 'a' && c <= 'z' {
+ return true
+ }
+ return c >= '0' && c <= '9'
+}
+
+// according to
+//
+// tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
+//
+// param n is remain part length, should be 240 exactly.
+func checkKeyTenant(key string, n int) bool {
+ if len(key) == 0 {
+ return false
+ }
+ return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:])
+}
+
+// based on the W3C Trace Context specification
+//
+// key = simple-key / multi-tenant-key
+// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// multi-tenant-key = tenant-id "@" system-id
+// tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// lcalpha = %x61-7A ; a-z
+//
+// see https://www.w3.org/TR/trace-context-1/#tracestate-header.
+func checkKey(key string) bool {
+ tenant, system, ok := strings.Cut(key, "@")
+ if !ok {
+ return checkKeyPart(key, 255)
+ }
+ return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13)
+}
+
+func newMember(key, value string) (member, error) {
+ if !checkKey(key) {
+ return member{}, errInvalidKey
+ }
+ if !checkValue(value) {
+ return member{}, errInvalidValue
+ }
+ return member{Key: key, Value: value}, nil
+}
+
+func parseMember(m string) (member, error) {
+ key, val, ok := strings.Cut(m, memberDelimiter)
+ if !ok {
+ return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
+ }
+ key = strings.TrimLeft(key, " \t")
+ val = strings.TrimRight(val, " \t")
+ result, e := newMember(key, val)
+ if e != nil {
+ return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
+ }
+ return result, nil
+}
+
+// String encodes member into a string compliant with the W3C Trace Context
+// specification.
+func (m member) String() string {
+ return m.Key + "=" + m.Value
+}
+
+// TraceState provides additional vendor-specific trace identification
+// information across different distributed tracing systems. It represents an
+// immutable list consisting of key/value pairs, each pair is referred to as a
+// list-member.
+//
+// TraceState conforms to the W3C Trace Context specification
+// (https://www.w3.org/TR/trace-context-1). All operations that create or copy
+// a TraceState do so by validating all input and will only produce TraceState
+// that conform to the specification. Specifically, this means that all
+// list-member's key/value pairs are valid, no duplicate list-members exist,
+// and the maximum number of list-members (32) is not exceeded.
+type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState`
+ // list is the members in order.
+ list []member
+}
+
+var _ json.Marshaler = TraceState{}
+
+// ParseTraceState attempts to decode a TraceState from the passed
+// string. It returns an error if the input is invalid according to the W3C
+// Trace Context specification.
+func ParseTraceState(ts string) (TraceState, error) {
+ if ts == "" {
+ return TraceState{}, nil
+ }
+
+ wrapErr := func(err error) error {
+ return fmt.Errorf("failed to parse tracestate: %w", err)
+ }
+
+ var members []member
+ found := make(map[string]struct{})
+ for ts != "" {
+ var memberStr string
+ memberStr, ts, _ = strings.Cut(ts, listDelimiters)
+ if len(memberStr) == 0 {
+ continue
+ }
+
+ m, err := parseMember(memberStr)
+ if err != nil {
+ return TraceState{}, wrapErr(err)
+ }
+
+ if _, ok := found[m.Key]; ok {
+ return TraceState{}, wrapErr(errDuplicate)
+ }
+ found[m.Key] = struct{}{}
+
+ members = append(members, m)
+ if n := len(members); n > maxListMembers {
+ return TraceState{}, wrapErr(errMemberNumber)
+ }
+ }
+
+ return TraceState{list: members}, nil
+}
+
+// MarshalJSON marshals the TraceState into JSON.
+func (ts TraceState) MarshalJSON() ([]byte, error) {
+ return json.Marshal(ts.String())
+}
+
+// String encodes the TraceState into a string compliant with the W3C
+// Trace Context specification. The returned string will be invalid if the
+// TraceState contains any invalid members.
+func (ts TraceState) String() string {
+ if len(ts.list) == 0 {
+ return ""
+ }
+ var n int
+ n += len(ts.list) // member delimiters: '='
+ n += len(ts.list) - 1 // list delimiters: ','
+ for _, mem := range ts.list {
+ n += len(mem.Key)
+ n += len(mem.Value)
+ }
+
+ var sb strings.Builder
+ sb.Grow(n)
+ _, _ = sb.WriteString(ts.list[0].Key)
+ _ = sb.WriteByte('=')
+ _, _ = sb.WriteString(ts.list[0].Value)
+ for i := 1; i < len(ts.list); i++ {
+ _ = sb.WriteByte(listDelimiters[0])
+ _, _ = sb.WriteString(ts.list[i].Key)
+ _ = sb.WriteByte('=')
+ _, _ = sb.WriteString(ts.list[i].Value)
+ }
+ return sb.String()
+}
+
+// Get returns the value paired with key from the corresponding TraceState
+// list-member if it exists, otherwise an empty string is returned.
+func (ts TraceState) Get(key string) string {
+ for _, member := range ts.list {
+ if member.Key == key {
+ return member.Value
+ }
+ }
+
+ return ""
+}
+
+// Walk walks all key value pairs in the TraceState by calling f
+// Iteration stops if f returns false.
+func (ts TraceState) Walk(f func(key, value string) bool) {
+ for _, m := range ts.list {
+ if !f(m.Key, m.Value) {
+ break
+ }
+ }
+}
+
+// Insert adds a new list-member defined by the key/value pair to the
+// TraceState. If a list-member already exists for the given key, that
+// list-member's value is updated. The new or updated list-member is always
+// moved to the beginning of the TraceState as specified by the W3C Trace
+// Context specification.
+//
+// If key or value are invalid according to the W3C Trace Context
+// specification an error is returned with the original TraceState.
+//
+// If adding a new list-member means the TraceState would have more members
+// then is allowed, the new list-member will be inserted and the right-most
+// list-member will be dropped in the returned TraceState.
+func (ts TraceState) Insert(key, value string) (TraceState, error) {
+ m, err := newMember(key, value)
+ if err != nil {
+ return ts, err
+ }
+ n := len(ts.list)
+ found := n
+ for i := range ts.list {
+ if ts.list[i].Key == key {
+ found = i
+ }
+ }
+ cTS := TraceState{}
+ if found == n && n < maxListMembers {
+ cTS.list = make([]member, n+1)
+ } else {
+ cTS.list = make([]member, n)
+ }
+ cTS.list[0] = m
+ // When the number of members exceeds capacity, drop the "right-most".
+ copy(cTS.list[1:], ts.list[0:found])
+ if found < n {
+ copy(cTS.list[1+found:], ts.list[found+1:])
+ }
+ return cTS, nil
+}
+
+// Delete returns a copy of the TraceState with the list-member identified by
+// key removed.
+func (ts TraceState) Delete(key string) TraceState {
+ members := make([]member, ts.Len())
+ copy(members, ts.list)
+ for i, member := range ts.list {
+ if member.Key == key {
+ members = append(members[:i], members[i+1:]...)
+ // TraceState should contain no duplicate members.
+ break
+ }
+ }
+ return TraceState{list: members}
+}
+
+// Len returns the number of list-members in the TraceState.
+func (ts TraceState) Len() int {
+ return len(ts.list)
+}
diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh
new file mode 100644
index 000000000..c9b7cdbbf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+# Copyright The OpenTelemetry Authors
+# SPDX-License-Identifier: Apache-2.0
+
+set -euo pipefail
+
+TARGET="${1:?Must provide target ref}"
+
+FILE="CHANGELOG.md"
+TEMP_DIR=$(mktemp -d)
+echo "Temp folder: $TEMP_DIR"
+
+# Only the latest commit of the feature branch is available
+# automatically. To diff with the base branch, we need to
+# fetch that too (and we only need its latest commit).
+git fetch origin "${TARGET}" --depth=1
+
+# Checkout the previous version on the base branch of the changelog to tmpfolder
+git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE
+
+PREVIOUS_FILE="$TEMP_DIR/$FILE"
+CURRENT_FILE="$FILE"
+PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md"
+CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md"
+
+# Extract released sections from the previous version
+awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE"
+
+# Extract released sections from the current version
+awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE"
+
+# Compare the released sections
+if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then
+ echo "Error: The released sections of the changelog file have been modified."
+ diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"
+ rm -rf "$TEMP_DIR"
+ false
+fi
+
+rm -rf "$TEMP_DIR"
+echo "The released sections remain unchanged."
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
new file mode 100644
index 000000000..7afe92b59
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otel // import "go.opentelemetry.io/otel"
+
+// Version is the current release version of OpenTelemetry in use.
+func Version() string {
+ return "1.37.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
new file mode 100644
index 000000000..9d4742a17
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -0,0 +1,44 @@
+# Copyright The OpenTelemetry Authors
+# SPDX-License-Identifier: Apache-2.0
+
+module-sets:
+ stable-v1:
+ version: v1.37.0
+ modules:
+ - go.opentelemetry.io/otel
+ - go.opentelemetry.io/otel/bridge/opencensus
+ - go.opentelemetry.io/otel/bridge/opencensus/test
+ - go.opentelemetry.io/otel/bridge/opentracing
+ - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
+ - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
+ - go.opentelemetry.io/otel/exporters/otlp/otlptrace
+ - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
+ - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
+ - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
+ - go.opentelemetry.io/otel/exporters/stdout/stdouttrace
+ - go.opentelemetry.io/otel/exporters/zipkin
+ - go.opentelemetry.io/otel/metric
+ - go.opentelemetry.io/otel/sdk
+ - go.opentelemetry.io/otel/sdk/metric
+ - go.opentelemetry.io/otel/trace
+ experimental-metrics:
+ version: v0.59.0
+ modules:
+ - go.opentelemetry.io/otel/exporters/prometheus
+ experimental-logs:
+ version: v0.13.0
+ modules:
+ - go.opentelemetry.io/otel/log
+ - go.opentelemetry.io/otel/log/logtest
+ - go.opentelemetry.io/otel/sdk/log
+ - go.opentelemetry.io/otel/sdk/log/logtest
+ - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
+ - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
+ - go.opentelemetry.io/otel/exporters/stdout/stdoutlog
+ experimental-schema:
+ version: v0.0.12
+ modules:
+ - go.opentelemetry.io/otel/schema
+excluded-modules:
+ - go.opentelemetry.io/otel/internal/tools
+ - go.opentelemetry.io/otel/trace/internal/telemetry/test
diff --git a/vendor/go.yaml.in/yaml/v2/.travis.yml b/vendor/go.yaml.in/yaml/v2/.travis.yml
new file mode 100644
index 000000000..7348c50c0
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+
+go:
+ - "1.4.x"
+ - "1.5.x"
+ - "1.6.x"
+ - "1.7.x"
+ - "1.8.x"
+ - "1.9.x"
+ - "1.10.x"
+ - "1.11.x"
+ - "1.12.x"
+ - "1.13.x"
+ - "1.14.x"
+ - "tip"
+
+go_import_path: gopkg.in/yaml.v2
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE b/vendor/go.yaml.in/yaml/v2/LICENSE
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE
rename to vendor/go.yaml.in/yaml/v2/LICENSE
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml b/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml
rename to vendor/go.yaml.in/yaml/v2/LICENSE.libyaml
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE b/vendor/go.yaml.in/yaml/v2/NOTICE
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE
rename to vendor/go.yaml.in/yaml/v2/NOTICE
diff --git a/vendor/go.yaml.in/yaml/v2/README.md b/vendor/go.yaml.in/yaml/v2/README.md
new file mode 100644
index 000000000..c9388da42
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/README.md
@@ -0,0 +1,131 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.1 and 1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *go.yaml.in/yaml/v2*.
+
+To install it, run:
+
+ go get go.yaml.in/yaml/v2
+
+API documentation
+-----------------
+
+See:
+
+API stability
+-------------
+
+The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "go.yaml.in/yaml/v2"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go b/vendor/go.yaml.in/yaml/v2/apic.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go
rename to vendor/go.yaml.in/yaml/v2/apic.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go b/vendor/go.yaml.in/yaml/v2/decode.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go
rename to vendor/go.yaml.in/yaml/v2/decode.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go b/vendor/go.yaml.in/yaml/v2/emitterc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go
rename to vendor/go.yaml.in/yaml/v2/emitterc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go b/vendor/go.yaml.in/yaml/v2/encode.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go
rename to vendor/go.yaml.in/yaml/v2/encode.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go b/vendor/go.yaml.in/yaml/v2/parserc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go
rename to vendor/go.yaml.in/yaml/v2/parserc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go b/vendor/go.yaml.in/yaml/v2/readerc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go
rename to vendor/go.yaml.in/yaml/v2/readerc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go b/vendor/go.yaml.in/yaml/v2/resolve.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go
rename to vendor/go.yaml.in/yaml/v2/resolve.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go b/vendor/go.yaml.in/yaml/v2/scannerc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go
rename to vendor/go.yaml.in/yaml/v2/scannerc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go b/vendor/go.yaml.in/yaml/v2/sorter.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go
rename to vendor/go.yaml.in/yaml/v2/sorter.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go b/vendor/go.yaml.in/yaml/v2/writerc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go
rename to vendor/go.yaml.in/yaml/v2/writerc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go b/vendor/go.yaml.in/yaml/v2/yaml.go
similarity index 99%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go
rename to vendor/go.yaml.in/yaml/v2/yaml.go
index 30813884c..5248e1263 100644
--- a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go
+++ b/vendor/go.yaml.in/yaml/v2/yaml.go
@@ -2,7 +2,7 @@
//
// Source code and other details for the project are available at GitHub:
//
-// https://github.com/go-yaml/yaml
+// https://github.com/yaml/go-yaml
//
package yaml
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go b/vendor/go.yaml.in/yaml/v2/yamlh.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go
rename to vendor/go.yaml.in/yaml/v2/yamlh.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go b/vendor/go.yaml.in/yaml/v2/yamlprivateh.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go
rename to vendor/go.yaml.in/yaml/v2/yamlprivateh.go
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index 97bd8b06f..db3264da8 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -39,7 +39,7 @@ const (
FrameContinuation FrameType = 0x9
)
-var frameName = map[FrameType]string{
+var frameNames = [...]string{
FrameData: "DATA",
FrameHeaders: "HEADERS",
FramePriority: "PRIORITY",
@@ -53,10 +53,10 @@ var frameName = map[FrameType]string{
}
func (t FrameType) String() string {
- if s, ok := frameName[t]; ok {
- return s
+ if int(t) < len(frameNames) {
+ return frameNames[t]
}
- return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t))
+ return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", t)
}
// Flags is a bitmask of HTTP/2 flags.
@@ -124,7 +124,7 @@ var flagName = map[FrameType]map[Flags]string{
// might be 0).
type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error)
-var frameParsers = map[FrameType]frameParser{
+var frameParsers = [...]frameParser{
FrameData: parseDataFrame,
FrameHeaders: parseHeadersFrame,
FramePriority: parsePriorityFrame,
@@ -138,8 +138,8 @@ var frameParsers = map[FrameType]frameParser{
}
func typeFrameParser(t FrameType) frameParser {
- if f := frameParsers[t]; f != nil {
- return f
+ if int(t) < len(frameParsers) {
+ return frameParsers[t]
}
return parseUnknownFrame
}
@@ -509,7 +509,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
}
if fh.Length > fr.maxReadSize {
if fh == invalidHTTP1LookingFrameHeader() {
- return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err)
+ return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge)
}
return nil, ErrFrameTooLarge
}
diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go
deleted file mode 100644
index dc5225b6d..000000000
--- a/vendor/golang.org/x/net/internal/timeseries/timeseries.go
+++ /dev/null
@@ -1,525 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package timeseries implements a time series structure for stats collection.
-package timeseries // import "golang.org/x/net/internal/timeseries"
-
-import (
- "fmt"
- "log"
- "time"
-)
-
-const (
- timeSeriesNumBuckets = 64
- minuteHourSeriesNumBuckets = 60
-)
-
-var timeSeriesResolutions = []time.Duration{
- 1 * time.Second,
- 10 * time.Second,
- 1 * time.Minute,
- 10 * time.Minute,
- 1 * time.Hour,
- 6 * time.Hour,
- 24 * time.Hour, // 1 day
- 7 * 24 * time.Hour, // 1 week
- 4 * 7 * 24 * time.Hour, // 4 weeks
- 16 * 7 * 24 * time.Hour, // 16 weeks
-}
-
-var minuteHourSeriesResolutions = []time.Duration{
- 1 * time.Second,
- 1 * time.Minute,
-}
-
-// An Observable is a kind of data that can be aggregated in a time series.
-type Observable interface {
- Multiply(ratio float64) // Multiplies the data in self by a given ratio
- Add(other Observable) // Adds the data from a different observation to self
- Clear() // Clears the observation so it can be reused.
- CopyFrom(other Observable) // Copies the contents of a given observation to self
-}
-
-// Float attaches the methods of Observable to a float64.
-type Float float64
-
-// NewFloat returns a Float.
-func NewFloat() Observable {
- f := Float(0)
- return &f
-}
-
-// String returns the float as a string.
-func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
-
-// Value returns the float's value.
-func (f *Float) Value() float64 { return float64(*f) }
-
-func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
-
-func (f *Float) Add(other Observable) {
- o := other.(*Float)
- *f += *o
-}
-
-func (f *Float) Clear() { *f = 0 }
-
-func (f *Float) CopyFrom(other Observable) {
- o := other.(*Float)
- *f = *o
-}
-
-// A Clock tells the current time.
-type Clock interface {
- Time() time.Time
-}
-
-type defaultClock int
-
-var defaultClockInstance defaultClock
-
-func (defaultClock) Time() time.Time { return time.Now() }
-
-// Information kept per level. Each level consists of a circular list of
-// observations. The start of the level may be derived from end and the
-// len(buckets) * sizeInMillis.
-type tsLevel struct {
- oldest int // index to oldest bucketed Observable
- newest int // index to newest bucketed Observable
- end time.Time // end timestamp for this level
- size time.Duration // duration of the bucketed Observable
- buckets []Observable // collections of observations
- provider func() Observable // used for creating new Observable
-}
-
-func (l *tsLevel) Clear() {
- l.oldest = 0
- l.newest = len(l.buckets) - 1
- l.end = time.Time{}
- for i := range l.buckets {
- if l.buckets[i] != nil {
- l.buckets[i].Clear()
- l.buckets[i] = nil
- }
- }
-}
-
-func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
- l.size = size
- l.provider = f
- l.buckets = make([]Observable, numBuckets)
-}
-
-// Keeps a sequence of levels. Each level is responsible for storing data at
-// a given resolution. For example, the first level stores data at a one
-// minute resolution while the second level stores data at a one hour
-// resolution.
-
-// Each level is represented by a sequence of buckets. Each bucket spans an
-// interval equal to the resolution of the level. New observations are added
-// to the last bucket.
-type timeSeries struct {
- provider func() Observable // make more Observable
- numBuckets int // number of buckets in each level
- levels []*tsLevel // levels of bucketed Observable
- lastAdd time.Time // time of last Observable tracked
- total Observable // convenient aggregation of all Observable
- clock Clock // Clock for getting current time
- pending Observable // observations not yet bucketed
- pendingTime time.Time // what time are we keeping in pending
- dirty bool // if there are pending observations
-}
-
-// init initializes a level according to the supplied criteria.
-func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
- ts.provider = f
- ts.numBuckets = numBuckets
- ts.clock = clock
- ts.levels = make([]*tsLevel, len(resolutions))
-
- for i := range resolutions {
- if i > 0 && resolutions[i-1] >= resolutions[i] {
- log.Print("timeseries: resolutions must be monotonically increasing")
- break
- }
- newLevel := new(tsLevel)
- newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
- ts.levels[i] = newLevel
- }
-
- ts.Clear()
-}
-
-// Clear removes all observations from the time series.
-func (ts *timeSeries) Clear() {
- ts.lastAdd = time.Time{}
- ts.total = ts.resetObservation(ts.total)
- ts.pending = ts.resetObservation(ts.pending)
- ts.pendingTime = time.Time{}
- ts.dirty = false
-
- for i := range ts.levels {
- ts.levels[i].Clear()
- }
-}
-
-// Add records an observation at the current time.
-func (ts *timeSeries) Add(observation Observable) {
- ts.AddWithTime(observation, ts.clock.Time())
-}
-
-// AddWithTime records an observation at the specified time.
-func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
-
- smallBucketDuration := ts.levels[0].size
-
- if t.After(ts.lastAdd) {
- ts.lastAdd = t
- }
-
- if t.After(ts.pendingTime) {
- ts.advance(t)
- ts.mergePendingUpdates()
- ts.pendingTime = ts.levels[0].end
- ts.pending.CopyFrom(observation)
- ts.dirty = true
- } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
- // The observation is close enough to go into the pending bucket.
- // This compensates for clock skewing and small scheduling delays
- // by letting the update stay in the fast path.
- ts.pending.Add(observation)
- ts.dirty = true
- } else {
- ts.mergeValue(observation, t)
- }
-}
-
-// mergeValue inserts the observation at the specified time in the past into all levels.
-func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
- for _, level := range ts.levels {
- index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
- if 0 <= index && index < ts.numBuckets {
- bucketNumber := (level.oldest + index) % ts.numBuckets
- if level.buckets[bucketNumber] == nil {
- level.buckets[bucketNumber] = level.provider()
- }
- level.buckets[bucketNumber].Add(observation)
- }
- }
- ts.total.Add(observation)
-}
-
-// mergePendingUpdates applies the pending updates into all levels.
-func (ts *timeSeries) mergePendingUpdates() {
- if ts.dirty {
- ts.mergeValue(ts.pending, ts.pendingTime)
- ts.pending = ts.resetObservation(ts.pending)
- ts.dirty = false
- }
-}
-
-// advance cycles the buckets at each level until the latest bucket in
-// each level can hold the time specified.
-func (ts *timeSeries) advance(t time.Time) {
- if !t.After(ts.levels[0].end) {
- return
- }
- for i := 0; i < len(ts.levels); i++ {
- level := ts.levels[i]
- if !level.end.Before(t) {
- break
- }
-
- // If the time is sufficiently far, just clear the level and advance
- // directly.
- if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
- for _, b := range level.buckets {
- ts.resetObservation(b)
- }
- level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
- }
-
- for t.After(level.end) {
- level.end = level.end.Add(level.size)
- level.newest = level.oldest
- level.oldest = (level.oldest + 1) % ts.numBuckets
- ts.resetObservation(level.buckets[level.newest])
- }
-
- t = level.end
- }
-}
-
-// Latest returns the sum of the num latest buckets from the level.
-func (ts *timeSeries) Latest(level, num int) Observable {
- now := ts.clock.Time()
- if ts.levels[0].end.Before(now) {
- ts.advance(now)
- }
-
- ts.mergePendingUpdates()
-
- result := ts.provider()
- l := ts.levels[level]
- index := l.newest
-
- for i := 0; i < num; i++ {
- if l.buckets[index] != nil {
- result.Add(l.buckets[index])
- }
- if index == 0 {
- index = ts.numBuckets
- }
- index--
- }
-
- return result
-}
-
-// LatestBuckets returns a copy of the num latest buckets from level.
-func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
- if level < 0 || level > len(ts.levels) {
- log.Print("timeseries: bad level argument: ", level)
- return nil
- }
- if num < 0 || num >= ts.numBuckets {
- log.Print("timeseries: bad num argument: ", num)
- return nil
- }
-
- results := make([]Observable, num)
- now := ts.clock.Time()
- if ts.levels[0].end.Before(now) {
- ts.advance(now)
- }
-
- ts.mergePendingUpdates()
-
- l := ts.levels[level]
- index := l.newest
-
- for i := 0; i < num; i++ {
- result := ts.provider()
- results[i] = result
- if l.buckets[index] != nil {
- result.CopyFrom(l.buckets[index])
- }
-
- if index == 0 {
- index = ts.numBuckets
- }
- index -= 1
- }
- return results
-}
-
-// ScaleBy updates observations by scaling by factor.
-func (ts *timeSeries) ScaleBy(factor float64) {
- for _, l := range ts.levels {
- for i := 0; i < ts.numBuckets; i++ {
- l.buckets[i].Multiply(factor)
- }
- }
-
- ts.total.Multiply(factor)
- ts.pending.Multiply(factor)
-}
-
-// Range returns the sum of observations added over the specified time range.
-// If start or finish times don't fall on bucket boundaries of the same
-// level, then return values are approximate answers.
-func (ts *timeSeries) Range(start, finish time.Time) Observable {
- return ts.ComputeRange(start, finish, 1)[0]
-}
-
-// Recent returns the sum of observations from the last delta.
-func (ts *timeSeries) Recent(delta time.Duration) Observable {
- now := ts.clock.Time()
- return ts.Range(now.Add(-delta), now)
-}
-
-// Total returns the total of all observations.
-func (ts *timeSeries) Total() Observable {
- ts.mergePendingUpdates()
- return ts.total
-}
-
-// ComputeRange computes a specified number of values into a slice using
-// the observations recorded over the specified time period. The return
-// values are approximate if the start or finish times don't fall on the
-// bucket boundaries at the same level or if the number of buckets spanning
-// the range is not an integral multiple of num.
-func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
- if start.After(finish) {
- log.Printf("timeseries: start > finish, %v>%v", start, finish)
- return nil
- }
-
- if num < 0 {
- log.Printf("timeseries: num < 0, %v", num)
- return nil
- }
-
- results := make([]Observable, num)
-
- for _, l := range ts.levels {
- if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
- ts.extract(l, start, finish, num, results)
- return results
- }
- }
-
- // Failed to find a level that covers the desired range. So just
- // extract from the last level, even if it doesn't cover the entire
- // desired range.
- ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
-
- return results
-}
-
-// RecentList returns the specified number of values in slice over the most
-// recent time period of the specified range.
-func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
- if delta < 0 {
- return nil
- }
- now := ts.clock.Time()
- return ts.ComputeRange(now.Add(-delta), now, num)
-}
-
-// extract returns a slice of specified number of observations from a given
-// level over a given range.
-func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
- ts.mergePendingUpdates()
-
- srcInterval := l.size
- dstInterval := finish.Sub(start) / time.Duration(num)
- dstStart := start
- srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
-
- srcIndex := 0
-
- // Where should scanning start?
- if dstStart.After(srcStart) {
- advance := int(dstStart.Sub(srcStart) / srcInterval)
- srcIndex += advance
- srcStart = srcStart.Add(time.Duration(advance) * srcInterval)
- }
-
- // The i'th value is computed as show below.
- // interval = (finish/start)/num
- // i'th value = sum of observation in range
- // [ start + i * interval,
- // start + (i + 1) * interval )
- for i := 0; i < num; i++ {
- results[i] = ts.resetObservation(results[i])
- dstEnd := dstStart.Add(dstInterval)
- for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
- srcEnd := srcStart.Add(srcInterval)
- if srcEnd.After(ts.lastAdd) {
- srcEnd = ts.lastAdd
- }
-
- if !srcEnd.Before(dstStart) {
- srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
- if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
- // dst completely contains src.
- if srcValue != nil {
- results[i].Add(srcValue)
- }
- } else {
- // dst partially overlaps src.
- overlapStart := maxTime(srcStart, dstStart)
- overlapEnd := minTime(srcEnd, dstEnd)
- base := srcEnd.Sub(srcStart)
- fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
-
- used := ts.provider()
- if srcValue != nil {
- used.CopyFrom(srcValue)
- }
- used.Multiply(fraction)
- results[i].Add(used)
- }
-
- if srcEnd.After(dstEnd) {
- break
- }
- }
- srcIndex++
- srcStart = srcStart.Add(srcInterval)
- }
- dstStart = dstStart.Add(dstInterval)
- }
-}
-
-// resetObservation clears the content so the struct may be reused.
-func (ts *timeSeries) resetObservation(observation Observable) Observable {
- if observation == nil {
- observation = ts.provider()
- } else {
- observation.Clear()
- }
- return observation
-}
-
-// TimeSeries tracks data at granularities from 1 second to 16 weeks.
-type TimeSeries struct {
- timeSeries
-}
-
-// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
-func NewTimeSeries(f func() Observable) *TimeSeries {
- return NewTimeSeriesWithClock(f, defaultClockInstance)
-}
-
-// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
-// assigning timestamps.
-func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
- ts := new(TimeSeries)
- ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
- return ts
-}
-
-// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
-type MinuteHourSeries struct {
- timeSeries
-}
-
-// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
-func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
- return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
-}
-
-// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
-// assigning timestamps.
-func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
- ts := new(MinuteHourSeries)
- ts.timeSeries.init(minuteHourSeriesResolutions, f,
- minuteHourSeriesNumBuckets, clock)
- return ts
-}
-
-func (ts *MinuteHourSeries) Minute() Observable {
- return ts.timeSeries.Latest(0, 60)
-}
-
-func (ts *MinuteHourSeries) Hour() Observable {
- return ts.timeSeries.Latest(1, 60)
-}
-
-func minTime(a, b time.Time) time.Time {
- if a.Before(b) {
- return a
- }
- return b
-}
-
-func maxTime(a, b time.Time) time.Time {
- if a.After(b) {
- return a
- }
- return b
-}
diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go
deleted file mode 100644
index c646a6952..000000000
--- a/vendor/golang.org/x/net/trace/events.go
+++ /dev/null
@@ -1,532 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package trace
-
-import (
- "bytes"
- "fmt"
- "html/template"
- "io"
- "log"
- "net/http"
- "runtime"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "text/tabwriter"
- "time"
-)
-
-const maxEventsPerLog = 100
-
-type bucket struct {
- MaxErrAge time.Duration
- String string
-}
-
-var buckets = []bucket{
- {0, "total"},
- {10 * time.Second, "errs<10s"},
- {1 * time.Minute, "errs<1m"},
- {10 * time.Minute, "errs<10m"},
- {1 * time.Hour, "errs<1h"},
- {10 * time.Hour, "errs<10h"},
- {24000 * time.Hour, "errors"},
-}
-
-// RenderEvents renders the HTML page typically served at /debug/events.
-// It does not do any auth checking. The request may be nil.
-//
-// Most users will use the Events handler.
-func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
- now := time.Now()
- data := &struct {
- Families []string // family names
- Buckets []bucket
- Counts [][]int // eventLog count per family/bucket
-
- // Set when a bucket has been selected.
- Family string
- Bucket int
- EventLogs eventLogs
- Expanded bool
- }{
- Buckets: buckets,
- }
-
- data.Families = make([]string, 0, len(families))
- famMu.RLock()
- for name := range families {
- data.Families = append(data.Families, name)
- }
- famMu.RUnlock()
- sort.Strings(data.Families)
-
- // Count the number of eventLogs in each family for each error age.
- data.Counts = make([][]int, len(data.Families))
- for i, name := range data.Families {
- // TODO(sameer): move this loop under the family lock.
- f := getEventFamily(name)
- data.Counts[i] = make([]int, len(data.Buckets))
- for j, b := range data.Buckets {
- data.Counts[i][j] = f.Count(now, b.MaxErrAge)
- }
- }
-
- if req != nil {
- var ok bool
- data.Family, data.Bucket, ok = parseEventsArgs(req)
- if !ok {
- // No-op
- } else {
- data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
- }
- if data.EventLogs != nil {
- defer data.EventLogs.Free()
- sort.Sort(data.EventLogs)
- }
- if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
- data.Expanded = exp
- }
- }
-
- famMu.RLock()
- defer famMu.RUnlock()
- if err := eventsTmpl().Execute(w, data); err != nil {
- log.Printf("net/trace: Failed executing template: %v", err)
- }
-}
-
-func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
- fam, bStr := req.FormValue("fam"), req.FormValue("b")
- if fam == "" || bStr == "" {
- return "", 0, false
- }
- b, err := strconv.Atoi(bStr)
- if err != nil || b < 0 || b >= len(buckets) {
- return "", 0, false
- }
- return fam, b, true
-}
-
-// An EventLog provides a log of events associated with a specific object.
-type EventLog interface {
- // Printf formats its arguments with fmt.Sprintf and adds the
- // result to the event log.
- Printf(format string, a ...interface{})
-
- // Errorf is like Printf, but it marks this event as an error.
- Errorf(format string, a ...interface{})
-
- // Finish declares that this event log is complete.
- // The event log should not be used after calling this method.
- Finish()
-}
-
-// NewEventLog returns a new EventLog with the specified family name
-// and title.
-func NewEventLog(family, title string) EventLog {
- el := newEventLog()
- el.ref()
- el.Family, el.Title = family, title
- el.Start = time.Now()
- el.events = make([]logEntry, 0, maxEventsPerLog)
- el.stack = make([]uintptr, 32)
- n := runtime.Callers(2, el.stack)
- el.stack = el.stack[:n]
-
- getEventFamily(family).add(el)
- return el
-}
-
-func (el *eventLog) Finish() {
- getEventFamily(el.Family).remove(el)
- el.unref() // matches ref in New
-}
-
-var (
- famMu sync.RWMutex
- families = make(map[string]*eventFamily) // family name => family
-)
-
-func getEventFamily(fam string) *eventFamily {
- famMu.Lock()
- defer famMu.Unlock()
- f := families[fam]
- if f == nil {
- f = &eventFamily{}
- families[fam] = f
- }
- return f
-}
-
-type eventFamily struct {
- mu sync.RWMutex
- eventLogs eventLogs
-}
-
-func (f *eventFamily) add(el *eventLog) {
- f.mu.Lock()
- f.eventLogs = append(f.eventLogs, el)
- f.mu.Unlock()
-}
-
-func (f *eventFamily) remove(el *eventLog) {
- f.mu.Lock()
- defer f.mu.Unlock()
- for i, el0 := range f.eventLogs {
- if el == el0 {
- copy(f.eventLogs[i:], f.eventLogs[i+1:])
- f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
- return
- }
- }
-}
-
-func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
- f.mu.RLock()
- defer f.mu.RUnlock()
- for _, el := range f.eventLogs {
- if el.hasRecentError(now, maxErrAge) {
- n++
- }
- }
- return
-}
-
-func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
- f.mu.RLock()
- defer f.mu.RUnlock()
- els = make(eventLogs, 0, len(f.eventLogs))
- for _, el := range f.eventLogs {
- if el.hasRecentError(now, maxErrAge) {
- el.ref()
- els = append(els, el)
- }
- }
- return
-}
-
-type eventLogs []*eventLog
-
-// Free calls unref on each element of the list.
-func (els eventLogs) Free() {
- for _, el := range els {
- el.unref()
- }
-}
-
-// eventLogs may be sorted in reverse chronological order.
-func (els eventLogs) Len() int { return len(els) }
-func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
-func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] }
-
-// A logEntry is a timestamped log entry in an event log.
-type logEntry struct {
- When time.Time
- Elapsed time.Duration // since previous event in log
- NewDay bool // whether this event is on a different day to the previous event
- What string
- IsErr bool
-}
-
-// WhenString returns a string representation of the elapsed time of the event.
-// It will include the date if midnight was crossed.
-func (e logEntry) WhenString() string {
- if e.NewDay {
- return e.When.Format("2006/01/02 15:04:05.000000")
- }
- return e.When.Format("15:04:05.000000")
-}
-
-// An eventLog represents an active event log.
-type eventLog struct {
- // Family is the top-level grouping of event logs to which this belongs.
- Family string
-
- // Title is the title of this event log.
- Title string
-
- // Timing information.
- Start time.Time
-
- // Call stack where this event log was created.
- stack []uintptr
-
- // Append-only sequence of events.
- //
- // TODO(sameer): change this to a ring buffer to avoid the array copy
- // when we hit maxEventsPerLog.
- mu sync.RWMutex
- events []logEntry
- LastErrorTime time.Time
- discarded int
-
- refs int32 // how many buckets this is in
-}
-
-func (el *eventLog) reset() {
- // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
- el.Family = ""
- el.Title = ""
- el.Start = time.Time{}
- el.stack = nil
- el.events = nil
- el.LastErrorTime = time.Time{}
- el.discarded = 0
- el.refs = 0
-}
-
-func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
- if maxErrAge == 0 {
- return true
- }
- el.mu.RLock()
- defer el.mu.RUnlock()
- return now.Sub(el.LastErrorTime) < maxErrAge
-}
-
-// delta returns the elapsed time since the last event or the log start,
-// and whether it spans midnight.
-// L >= el.mu
-func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
- if len(el.events) == 0 {
- return t.Sub(el.Start), false
- }
- prev := el.events[len(el.events)-1].When
- return t.Sub(prev), prev.Day() != t.Day()
-
-}
-
-func (el *eventLog) Printf(format string, a ...interface{}) {
- el.printf(false, format, a...)
-}
-
-func (el *eventLog) Errorf(format string, a ...interface{}) {
- el.printf(true, format, a...)
-}
-
-func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
- e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
- el.mu.Lock()
- e.Elapsed, e.NewDay = el.delta(e.When)
- if len(el.events) < maxEventsPerLog {
- el.events = append(el.events, e)
- } else {
- // Discard the oldest event.
- if el.discarded == 0 {
- // el.discarded starts at two to count for the event it
- // is replacing, plus the next one that we are about to
- // drop.
- el.discarded = 2
- } else {
- el.discarded++
- }
- // TODO(sameer): if this causes allocations on a critical path,
- // change eventLog.What to be a fmt.Stringer, as in trace.go.
- el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
- // The timestamp of the discarded meta-event should be
- // the time of the last event it is representing.
- el.events[0].When = el.events[1].When
- copy(el.events[1:], el.events[2:])
- el.events[maxEventsPerLog-1] = e
- }
- if e.IsErr {
- el.LastErrorTime = e.When
- }
- el.mu.Unlock()
-}
-
-func (el *eventLog) ref() {
- atomic.AddInt32(&el.refs, 1)
-}
-
-func (el *eventLog) unref() {
- if atomic.AddInt32(&el.refs, -1) == 0 {
- freeEventLog(el)
- }
-}
-
-func (el *eventLog) When() string {
- return el.Start.Format("2006/01/02 15:04:05.000000")
-}
-
-func (el *eventLog) ElapsedTime() string {
- elapsed := time.Since(el.Start)
- return fmt.Sprintf("%.6f", elapsed.Seconds())
-}
-
-func (el *eventLog) Stack() string {
- buf := new(bytes.Buffer)
- tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
- printStackRecord(tw, el.stack)
- tw.Flush()
- return buf.String()
-}
-
-// printStackRecord prints the function + source line information
-// for a single stack trace.
-// Adapted from runtime/pprof/pprof.go.
-func printStackRecord(w io.Writer, stk []uintptr) {
- for _, pc := range stk {
- f := runtime.FuncForPC(pc)
- if f == nil {
- continue
- }
- file, line := f.FileLine(pc)
- name := f.Name()
- // Hide runtime.goexit and any runtime functions at the beginning.
- if strings.HasPrefix(name, "runtime.") {
- continue
- }
- fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line)
- }
-}
-
-func (el *eventLog) Events() []logEntry {
- el.mu.RLock()
- defer el.mu.RUnlock()
- return el.events
-}
-
-// freeEventLogs is a freelist of *eventLog
-var freeEventLogs = make(chan *eventLog, 1000)
-
-// newEventLog returns a event log ready to use.
-func newEventLog() *eventLog {
- select {
- case el := <-freeEventLogs:
- return el
- default:
- return new(eventLog)
- }
-}
-
-// freeEventLog adds el to freeEventLogs if there's room.
-// This is non-blocking.
-func freeEventLog(el *eventLog) {
- el.reset()
- select {
- case freeEventLogs <- el:
- default:
- }
-}
-
-var eventsTmplCache *template.Template
-var eventsTmplOnce sync.Once
-
-func eventsTmpl() *template.Template {
- eventsTmplOnce.Do(func() {
- eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{
- "elapsed": elapsed,
- "trimSpace": strings.TrimSpace,
- }).Parse(eventsHTML))
- })
- return eventsTmplCache
-}
-
-const eventsHTML = `
-
-
- events
-
-
-
-
-/debug/events
-
-
-
-{{if $.EventLogs}}
-
-Family: {{$.Family}}
-
-{{if $.Expanded}}{{end}}
-[Summary]{{if $.Expanded}}{{end}}
-
-{{if not $.Expanded}}{{end}}
-[Expanded]{{if not $.Expanded}}{{end}}
-
-
- | When | Elapsed |
- {{range $el := $.EventLogs}}
-
- | {{$el.When}} |
- {{$el.ElapsedTime}} |
- {{$el.Title}}
- |
- {{if $.Expanded}}
-
- |
- |
- {{$el.Stack|trimSpace}} |
-
- {{range $el.Events}}
-
- | {{.WhenString}} |
- {{elapsed .Elapsed}} |
- .{{if .IsErr}}E{{else}}.{{end}}. {{.What}} |
-
- {{end}}
- {{end}}
- {{end}}
-
-{{end}}
-
-
-`
diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go
deleted file mode 100644
index d6c71101e..000000000
--- a/vendor/golang.org/x/net/trace/histogram.go
+++ /dev/null
@@ -1,365 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package trace
-
-// This file implements histogramming for RPC statistics collection.
-
-import (
- "bytes"
- "fmt"
- "html/template"
- "log"
- "math"
- "sync"
-
- "golang.org/x/net/internal/timeseries"
-)
-
-const (
- bucketCount = 38
-)
-
-// histogram keeps counts of values in buckets that are spaced
-// out in powers of 2: 0-1, 2-3, 4-7...
-// histogram implements timeseries.Observable
-type histogram struct {
- sum int64 // running total of measurements
- sumOfSquares float64 // square of running total
- buckets []int64 // bucketed values for histogram
- value int // holds a single value as an optimization
- valueCount int64 // number of values recorded for single value
-}
-
-// addMeasurement records a value measurement observation to the histogram.
-func (h *histogram) addMeasurement(value int64) {
- // TODO: assert invariant
- h.sum += value
- h.sumOfSquares += float64(value) * float64(value)
-
- bucketIndex := getBucket(value)
-
- if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
- h.value = bucketIndex
- h.valueCount++
- } else {
- h.allocateBuckets()
- h.buckets[bucketIndex]++
- }
-}
-
-func (h *histogram) allocateBuckets() {
- if h.buckets == nil {
- h.buckets = make([]int64, bucketCount)
- h.buckets[h.value] = h.valueCount
- h.value = 0
- h.valueCount = -1
- }
-}
-
-func log2(i int64) int {
- n := 0
- for ; i >= 0x100; i >>= 8 {
- n += 8
- }
- for ; i > 0; i >>= 1 {
- n += 1
- }
- return n
-}
-
-func getBucket(i int64) (index int) {
- index = log2(i) - 1
- if index < 0 {
- index = 0
- }
- if index >= bucketCount {
- index = bucketCount - 1
- }
- return
-}
-
-// Total returns the number of recorded observations.
-func (h *histogram) total() (total int64) {
- if h.valueCount >= 0 {
- total = h.valueCount
- }
- for _, val := range h.buckets {
- total += int64(val)
- }
- return
-}
-
-// Average returns the average value of recorded observations.
-func (h *histogram) average() float64 {
- t := h.total()
- if t == 0 {
- return 0
- }
- return float64(h.sum) / float64(t)
-}
-
-// Variance returns the variance of recorded observations.
-func (h *histogram) variance() float64 {
- t := float64(h.total())
- if t == 0 {
- return 0
- }
- s := float64(h.sum) / t
- return h.sumOfSquares/t - s*s
-}
-
-// StandardDeviation returns the standard deviation of recorded observations.
-func (h *histogram) standardDeviation() float64 {
- return math.Sqrt(h.variance())
-}
-
-// PercentileBoundary estimates the value that the given fraction of recorded
-// observations are less than.
-func (h *histogram) percentileBoundary(percentile float64) int64 {
- total := h.total()
-
- // Corner cases (make sure result is strictly less than Total())
- if total == 0 {
- return 0
- } else if total == 1 {
- return int64(h.average())
- }
-
- percentOfTotal := round(float64(total) * percentile)
- var runningTotal int64
-
- for i := range h.buckets {
- value := h.buckets[i]
- runningTotal += value
- if runningTotal == percentOfTotal {
- // We hit an exact bucket boundary. If the next bucket has data, it is a
- // good estimate of the value. If the bucket is empty, we interpolate the
- // midpoint between the next bucket's boundary and the next non-zero
- // bucket. If the remaining buckets are all empty, then we use the
- // boundary for the next bucket as the estimate.
- j := uint8(i + 1)
- min := bucketBoundary(j)
- if runningTotal < total {
- for h.buckets[j] == 0 {
- j++
- }
- }
- max := bucketBoundary(j)
- return min + round(float64(max-min)/2)
- } else if runningTotal > percentOfTotal {
- // The value is in this bucket. Interpolate the value.
- delta := runningTotal - percentOfTotal
- percentBucket := float64(value-delta) / float64(value)
- bucketMin := bucketBoundary(uint8(i))
- nextBucketMin := bucketBoundary(uint8(i + 1))
- bucketSize := nextBucketMin - bucketMin
- return bucketMin + round(percentBucket*float64(bucketSize))
- }
- }
- return bucketBoundary(bucketCount - 1)
-}
-
-// Median returns the estimated median of the observed values.
-func (h *histogram) median() int64 {
- return h.percentileBoundary(0.5)
-}
-
-// Add adds other to h.
-func (h *histogram) Add(other timeseries.Observable) {
- o := other.(*histogram)
- if o.valueCount == 0 {
- // Other histogram is empty
- } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
- // Both have a single bucketed value, aggregate them
- h.valueCount += o.valueCount
- } else {
- // Two different values necessitate buckets in this histogram
- h.allocateBuckets()
- if o.valueCount >= 0 {
- h.buckets[o.value] += o.valueCount
- } else {
- for i := range h.buckets {
- h.buckets[i] += o.buckets[i]
- }
- }
- }
- h.sumOfSquares += o.sumOfSquares
- h.sum += o.sum
-}
-
-// Clear resets the histogram to an empty state, removing all observed values.
-func (h *histogram) Clear() {
- h.buckets = nil
- h.value = 0
- h.valueCount = 0
- h.sum = 0
- h.sumOfSquares = 0
-}
-
-// CopyFrom copies from other, which must be a *histogram, into h.
-func (h *histogram) CopyFrom(other timeseries.Observable) {
- o := other.(*histogram)
- if o.valueCount == -1 {
- h.allocateBuckets()
- copy(h.buckets, o.buckets)
- }
- h.sum = o.sum
- h.sumOfSquares = o.sumOfSquares
- h.value = o.value
- h.valueCount = o.valueCount
-}
-
-// Multiply scales the histogram by the specified ratio.
-func (h *histogram) Multiply(ratio float64) {
- if h.valueCount == -1 {
- for i := range h.buckets {
- h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
- }
- } else {
- h.valueCount = int64(float64(h.valueCount) * ratio)
- }
- h.sum = int64(float64(h.sum) * ratio)
- h.sumOfSquares = h.sumOfSquares * ratio
-}
-
-// New creates a new histogram.
-func (h *histogram) New() timeseries.Observable {
- r := new(histogram)
- r.Clear()
- return r
-}
-
-func (h *histogram) String() string {
- return fmt.Sprintf("%d, %f, %d, %d, %v",
- h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
-}
-
-// round returns the closest int64 to the argument
-func round(in float64) int64 {
- return int64(math.Floor(in + 0.5))
-}
-
-// bucketBoundary returns the first value in the bucket.
-func bucketBoundary(bucket uint8) int64 {
- if bucket == 0 {
- return 0
- }
- return 1 << bucket
-}
-
-// bucketData holds data about a specific bucket for use in distTmpl.
-type bucketData struct {
- Lower, Upper int64
- N int64
- Pct, CumulativePct float64
- GraphWidth int
-}
-
-// data holds data about a Distribution for use in distTmpl.
-type data struct {
- Buckets []*bucketData
- Count, Median int64
- Mean, StandardDeviation float64
-}
-
-// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
-const maxHTMLBarWidth = 350.0
-
-// newData returns data representing h for use in distTmpl.
-func (h *histogram) newData() *data {
- // Force the allocation of buckets to simplify the rendering implementation
- h.allocateBuckets()
- // We scale the bars on the right so that the largest bar is
- // maxHTMLBarWidth pixels in width.
- maxBucket := int64(0)
- for _, n := range h.buckets {
- if n > maxBucket {
- maxBucket = n
- }
- }
- total := h.total()
- barsizeMult := maxHTMLBarWidth / float64(maxBucket)
- var pctMult float64
- if total == 0 {
- pctMult = 1.0
- } else {
- pctMult = 100.0 / float64(total)
- }
-
- buckets := make([]*bucketData, len(h.buckets))
- runningTotal := int64(0)
- for i, n := range h.buckets {
- if n == 0 {
- continue
- }
- runningTotal += n
- var upperBound int64
- if i < bucketCount-1 {
- upperBound = bucketBoundary(uint8(i + 1))
- } else {
- upperBound = math.MaxInt64
- }
- buckets[i] = &bucketData{
- Lower: bucketBoundary(uint8(i)),
- Upper: upperBound,
- N: n,
- Pct: float64(n) * pctMult,
- CumulativePct: float64(runningTotal) * pctMult,
- GraphWidth: int(float64(n) * barsizeMult),
- }
- }
- return &data{
- Buckets: buckets,
- Count: total,
- Median: h.median(),
- Mean: h.average(),
- StandardDeviation: h.standardDeviation(),
- }
-}
-
-func (h *histogram) html() template.HTML {
- buf := new(bytes.Buffer)
- if err := distTmpl().Execute(buf, h.newData()); err != nil {
- buf.Reset()
- log.Printf("net/trace: couldn't execute template: %v", err)
- }
- return template.HTML(buf.String())
-}
-
-var distTmplCache *template.Template
-var distTmplOnce sync.Once
-
-func distTmpl() *template.Template {
- distTmplOnce.Do(func() {
- // Input: data
- distTmplCache = template.Must(template.New("distTmpl").Parse(`
-
-
- | Count: {{.Count}} |
- Mean: {{printf "%.0f" .Mean}} |
- StdDev: {{printf "%.0f" .StandardDeviation}} |
- Median: {{.Median}} |
-
-
-
-
-{{range $b := .Buckets}}
-{{if $b}}
-
- | [ |
- {{.Lower}}, |
- {{.Upper}}) |
- {{.N}} |
- {{printf "%#.3f" .Pct}}% |
- {{printf "%#.3f" .CumulativePct}}% |
- |
-
-{{end}}
-{{end}}
-
-`))
- })
- return distTmplCache
-}
diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go
deleted file mode 100644
index eae2a99f5..000000000
--- a/vendor/golang.org/x/net/trace/trace.go
+++ /dev/null
@@ -1,1130 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package trace implements tracing of requests and long-lived objects.
-It exports HTTP interfaces on /debug/requests and /debug/events.
-
-A trace.Trace provides tracing for short-lived objects, usually requests.
-A request handler might be implemented like this:
-
- func fooHandler(w http.ResponseWriter, req *http.Request) {
- tr := trace.New("mypkg.Foo", req.URL.Path)
- defer tr.Finish()
- ...
- tr.LazyPrintf("some event %q happened", str)
- ...
- if err := somethingImportant(); err != nil {
- tr.LazyPrintf("somethingImportant failed: %v", err)
- tr.SetError()
- }
- }
-
-The /debug/requests HTTP endpoint organizes the traces by family,
-errors, and duration. It also provides histogram of request duration
-for each family.
-
-A trace.EventLog provides tracing for long-lived objects, such as RPC
-connections.
-
- // A Fetcher fetches URL paths for a single domain.
- type Fetcher struct {
- domain string
- events trace.EventLog
- }
-
- func NewFetcher(domain string) *Fetcher {
- return &Fetcher{
- domain,
- trace.NewEventLog("mypkg.Fetcher", domain),
- }
- }
-
- func (f *Fetcher) Fetch(path string) (string, error) {
- resp, err := http.Get("http://" + f.domain + "/" + path)
- if err != nil {
- f.events.Errorf("Get(%q) = %v", path, err)
- return "", err
- }
- f.events.Printf("Get(%q) = %s", path, resp.Status)
- ...
- }
-
- func (f *Fetcher) Close() error {
- f.events.Finish()
- return nil
- }
-
-The /debug/events HTTP endpoint organizes the event logs by family and
-by time since the last error. The expanded view displays recent log
-entries and the log's call stack.
-*/
-package trace // import "golang.org/x/net/trace"
-
-import (
- "bytes"
- "context"
- "fmt"
- "html/template"
- "io"
- "log"
- "net"
- "net/http"
- "net/url"
- "runtime"
- "sort"
- "strconv"
- "sync"
- "sync/atomic"
- "time"
-
- "golang.org/x/net/internal/timeseries"
-)
-
-// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing.
-// FOR DEBUGGING ONLY. This will slow down the program.
-var DebugUseAfterFinish = false
-
-// HTTP ServeMux paths.
-const (
- debugRequestsPath = "/debug/requests"
- debugEventsPath = "/debug/events"
-)
-
-// AuthRequest determines whether a specific request is permitted to load the
-// /debug/requests or /debug/events pages.
-//
-// It returns two bools; the first indicates whether the page may be viewed at all,
-// and the second indicates whether sensitive events will be shown.
-//
-// AuthRequest may be replaced by a program to customize its authorization requirements.
-//
-// The default AuthRequest function returns (true, true) if and only if the request
-// comes from localhost/127.0.0.1/[::1].
-var AuthRequest = func(req *http.Request) (any, sensitive bool) {
- // RemoteAddr is commonly in the form "IP" or "IP:port".
- // If it is in the form "IP:port", split off the port.
- host, _, err := net.SplitHostPort(req.RemoteAddr)
- if err != nil {
- host = req.RemoteAddr
- }
- switch host {
- case "localhost", "127.0.0.1", "::1":
- return true, true
- default:
- return false, false
- }
-}
-
-func init() {
- _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}})
- if pat == debugRequestsPath {
- panic("/debug/requests is already registered. You may have two independent copies of " +
- "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " +
- "involve a vendored copy of golang.org/x/net/trace.")
- }
-
- // TODO(jbd): Serve Traces from /debug/traces in the future?
- // There is no requirement for a request to be present to have traces.
- http.HandleFunc(debugRequestsPath, Traces)
- http.HandleFunc(debugEventsPath, Events)
-}
-
-// NewContext returns a copy of the parent context
-// and associates it with a Trace.
-func NewContext(ctx context.Context, tr Trace) context.Context {
- return context.WithValue(ctx, contextKey, tr)
-}
-
-// FromContext returns the Trace bound to the context, if any.
-func FromContext(ctx context.Context) (tr Trace, ok bool) {
- tr, ok = ctx.Value(contextKey).(Trace)
- return
-}
-
-// Traces responds with traces from the program.
-// The package initialization registers it in http.DefaultServeMux
-// at /debug/requests.
-//
-// It performs authorization by running AuthRequest.
-func Traces(w http.ResponseWriter, req *http.Request) {
- any, sensitive := AuthRequest(req)
- if !any {
- http.Error(w, "not allowed", http.StatusUnauthorized)
- return
- }
- w.Header().Set("Content-Type", "text/html; charset=utf-8")
- Render(w, req, sensitive)
-}
-
-// Events responds with a page of events collected by EventLogs.
-// The package initialization registers it in http.DefaultServeMux
-// at /debug/events.
-//
-// It performs authorization by running AuthRequest.
-func Events(w http.ResponseWriter, req *http.Request) {
- any, sensitive := AuthRequest(req)
- if !any {
- http.Error(w, "not allowed", http.StatusUnauthorized)
- return
- }
- w.Header().Set("Content-Type", "text/html; charset=utf-8")
- RenderEvents(w, req, sensitive)
-}
-
-// Render renders the HTML page typically served at /debug/requests.
-// It does not do any auth checking. The request may be nil.
-//
-// Most users will use the Traces handler.
-func Render(w io.Writer, req *http.Request, sensitive bool) {
- data := &struct {
- Families []string
- ActiveTraceCount map[string]int
- CompletedTraces map[string]*family
-
- // Set when a bucket has been selected.
- Traces traceList
- Family string
- Bucket int
- Expanded bool
- Traced bool
- Active bool
- ShowSensitive bool // whether to show sensitive events
-
- Histogram template.HTML
- HistogramWindow string // e.g. "last minute", "last hour", "all time"
-
- // If non-zero, the set of traces is a partial set,
- // and this is the total number.
- Total int
- }{
- CompletedTraces: completedTraces,
- }
-
- data.ShowSensitive = sensitive
- if req != nil {
- // Allow show_sensitive=0 to force hiding of sensitive data for testing.
- // This only goes one way; you can't use show_sensitive=1 to see things.
- if req.FormValue("show_sensitive") == "0" {
- data.ShowSensitive = false
- }
-
- if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
- data.Expanded = exp
- }
- if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil {
- data.Traced = exp
- }
- }
-
- completedMu.RLock()
- data.Families = make([]string, 0, len(completedTraces))
- for fam := range completedTraces {
- data.Families = append(data.Families, fam)
- }
- completedMu.RUnlock()
- sort.Strings(data.Families)
-
- // We are careful here to minimize the time spent locking activeMu,
- // since that lock is required every time an RPC starts and finishes.
- data.ActiveTraceCount = make(map[string]int, len(data.Families))
- activeMu.RLock()
- for fam, s := range activeTraces {
- data.ActiveTraceCount[fam] = s.Len()
- }
- activeMu.RUnlock()
-
- var ok bool
- data.Family, data.Bucket, ok = parseArgs(req)
- switch {
- case !ok:
- // No-op
- case data.Bucket == -1:
- data.Active = true
- n := data.ActiveTraceCount[data.Family]
- data.Traces = getActiveTraces(data.Family)
- if len(data.Traces) < n {
- data.Total = n
- }
- case data.Bucket < bucketsPerFamily:
- if b := lookupBucket(data.Family, data.Bucket); b != nil {
- data.Traces = b.Copy(data.Traced)
- }
- default:
- if f := getFamily(data.Family, false); f != nil {
- var obs timeseries.Observable
- f.LatencyMu.RLock()
- switch o := data.Bucket - bucketsPerFamily; o {
- case 0:
- obs = f.Latency.Minute()
- data.HistogramWindow = "last minute"
- case 1:
- obs = f.Latency.Hour()
- data.HistogramWindow = "last hour"
- case 2:
- obs = f.Latency.Total()
- data.HistogramWindow = "all time"
- }
- f.LatencyMu.RUnlock()
- if obs != nil {
- data.Histogram = obs.(*histogram).html()
- }
- }
- }
-
- if data.Traces != nil {
- defer data.Traces.Free()
- sort.Sort(data.Traces)
- }
-
- completedMu.RLock()
- defer completedMu.RUnlock()
- if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil {
- log.Printf("net/trace: Failed executing template: %v", err)
- }
-}
-
-func parseArgs(req *http.Request) (fam string, b int, ok bool) {
- if req == nil {
- return "", 0, false
- }
- fam, bStr := req.FormValue("fam"), req.FormValue("b")
- if fam == "" || bStr == "" {
- return "", 0, false
- }
- b, err := strconv.Atoi(bStr)
- if err != nil || b < -1 {
- return "", 0, false
- }
-
- return fam, b, true
-}
-
-func lookupBucket(fam string, b int) *traceBucket {
- f := getFamily(fam, false)
- if f == nil || b < 0 || b >= len(f.Buckets) {
- return nil
- }
- return f.Buckets[b]
-}
-
-type contextKeyT string
-
-var contextKey = contextKeyT("golang.org/x/net/trace.Trace")
-
-// Trace represents an active request.
-type Trace interface {
- // LazyLog adds x to the event log. It will be evaluated each time the
- // /debug/requests page is rendered. Any memory referenced by x will be
- // pinned until the trace is finished and later discarded.
- LazyLog(x fmt.Stringer, sensitive bool)
-
- // LazyPrintf evaluates its arguments with fmt.Sprintf each time the
- // /debug/requests page is rendered. Any memory referenced by a will be
- // pinned until the trace is finished and later discarded.
- LazyPrintf(format string, a ...interface{})
-
- // SetError declares that this trace resulted in an error.
- SetError()
-
- // SetRecycler sets a recycler for the trace.
- // f will be called for each event passed to LazyLog at a time when
- // it is no longer required, whether while the trace is still active
- // and the event is discarded, or when a completed trace is discarded.
- SetRecycler(f func(interface{}))
-
- // SetTraceInfo sets the trace info for the trace.
- // This is currently unused.
- SetTraceInfo(traceID, spanID uint64)
-
- // SetMaxEvents sets the maximum number of events that will be stored
- // in the trace. This has no effect if any events have already been
- // added to the trace.
- SetMaxEvents(m int)
-
- // Finish declares that this trace is complete.
- // The trace should not be used after calling this method.
- Finish()
-}
-
-type lazySprintf struct {
- format string
- a []interface{}
-}
-
-func (l *lazySprintf) String() string {
- return fmt.Sprintf(l.format, l.a...)
-}
-
-// New returns a new Trace with the specified family and title.
-func New(family, title string) Trace {
- tr := newTrace()
- tr.ref()
- tr.Family, tr.Title = family, title
- tr.Start = time.Now()
- tr.maxEvents = maxEventsPerTrace
- tr.events = tr.eventsBuf[:0]
-
- activeMu.RLock()
- s := activeTraces[tr.Family]
- activeMu.RUnlock()
- if s == nil {
- activeMu.Lock()
- s = activeTraces[tr.Family] // check again
- if s == nil {
- s = new(traceSet)
- activeTraces[tr.Family] = s
- }
- activeMu.Unlock()
- }
- s.Add(tr)
-
- // Trigger allocation of the completed trace structure for this family.
- // This will cause the family to be present in the request page during
- // the first trace of this family. We don't care about the return value,
- // nor is there any need for this to run inline, so we execute it in its
- // own goroutine, but only if the family isn't allocated yet.
- completedMu.RLock()
- if _, ok := completedTraces[tr.Family]; !ok {
- go allocFamily(tr.Family)
- }
- completedMu.RUnlock()
-
- return tr
-}
-
-func (tr *trace) Finish() {
- elapsed := time.Since(tr.Start)
- tr.mu.Lock()
- tr.Elapsed = elapsed
- tr.mu.Unlock()
-
- if DebugUseAfterFinish {
- buf := make([]byte, 4<<10) // 4 KB should be enough
- n := runtime.Stack(buf, false)
- tr.finishStack = buf[:n]
- }
-
- activeMu.RLock()
- m := activeTraces[tr.Family]
- activeMu.RUnlock()
- m.Remove(tr)
-
- f := getFamily(tr.Family, true)
- tr.mu.RLock() // protects tr fields in Cond.match calls
- for _, b := range f.Buckets {
- if b.Cond.match(tr) {
- b.Add(tr)
- }
- }
- tr.mu.RUnlock()
-
- // Add a sample of elapsed time as microseconds to the family's timeseries
- h := new(histogram)
- h.addMeasurement(elapsed.Nanoseconds() / 1e3)
- f.LatencyMu.Lock()
- f.Latency.Add(h)
- f.LatencyMu.Unlock()
-
- tr.unref() // matches ref in New
-}
-
-const (
- bucketsPerFamily = 9
- tracesPerBucket = 10
- maxActiveTraces = 20 // Maximum number of active traces to show.
- maxEventsPerTrace = 10
- numHistogramBuckets = 38
-)
-
-var (
- // The active traces.
- activeMu sync.RWMutex
- activeTraces = make(map[string]*traceSet) // family -> traces
-
- // Families of completed traces.
- completedMu sync.RWMutex
- completedTraces = make(map[string]*family) // family -> traces
-)
-
-type traceSet struct {
- mu sync.RWMutex
- m map[*trace]bool
-
- // We could avoid the entire map scan in FirstN by having a slice of all the traces
- // ordered by start time, and an index into that from the trace struct, with a periodic
- // repack of the slice after enough traces finish; we could also use a skip list or similar.
- // However, that would shift some of the expense from /debug/requests time to RPC time,
- // which is probably the wrong trade-off.
-}
-
-func (ts *traceSet) Len() int {
- ts.mu.RLock()
- defer ts.mu.RUnlock()
- return len(ts.m)
-}
-
-func (ts *traceSet) Add(tr *trace) {
- ts.mu.Lock()
- if ts.m == nil {
- ts.m = make(map[*trace]bool)
- }
- ts.m[tr] = true
- ts.mu.Unlock()
-}
-
-func (ts *traceSet) Remove(tr *trace) {
- ts.mu.Lock()
- delete(ts.m, tr)
- ts.mu.Unlock()
-}
-
-// FirstN returns the first n traces ordered by time.
-func (ts *traceSet) FirstN(n int) traceList {
- ts.mu.RLock()
- defer ts.mu.RUnlock()
-
- if n > len(ts.m) {
- n = len(ts.m)
- }
- trl := make(traceList, 0, n)
-
- // Fast path for when no selectivity is needed.
- if n == len(ts.m) {
- for tr := range ts.m {
- tr.ref()
- trl = append(trl, tr)
- }
- sort.Sort(trl)
- return trl
- }
-
- // Pick the oldest n traces.
- // This is inefficient. See the comment in the traceSet struct.
- for tr := range ts.m {
- // Put the first n traces into trl in the order they occur.
- // When we have n, sort trl, and thereafter maintain its order.
- if len(trl) < n {
- tr.ref()
- trl = append(trl, tr)
- if len(trl) == n {
- // This is guaranteed to happen exactly once during this loop.
- sort.Sort(trl)
- }
- continue
- }
- if tr.Start.After(trl[n-1].Start) {
- continue
- }
-
- // Find where to insert this one.
- tr.ref()
- i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) })
- trl[n-1].unref()
- copy(trl[i+1:], trl[i:])
- trl[i] = tr
- }
-
- return trl
-}
-
-func getActiveTraces(fam string) traceList {
- activeMu.RLock()
- s := activeTraces[fam]
- activeMu.RUnlock()
- if s == nil {
- return nil
- }
- return s.FirstN(maxActiveTraces)
-}
-
-func getFamily(fam string, allocNew bool) *family {
- completedMu.RLock()
- f := completedTraces[fam]
- completedMu.RUnlock()
- if f == nil && allocNew {
- f = allocFamily(fam)
- }
- return f
-}
-
-func allocFamily(fam string) *family {
- completedMu.Lock()
- defer completedMu.Unlock()
- f := completedTraces[fam]
- if f == nil {
- f = newFamily()
- completedTraces[fam] = f
- }
- return f
-}
-
-// family represents a set of trace buckets and associated latency information.
-type family struct {
- // traces may occur in multiple buckets.
- Buckets [bucketsPerFamily]*traceBucket
-
- // latency time series
- LatencyMu sync.RWMutex
- Latency *timeseries.MinuteHourSeries
-}
-
-func newFamily() *family {
- return &family{
- Buckets: [bucketsPerFamily]*traceBucket{
- {Cond: minCond(0)},
- {Cond: minCond(50 * time.Millisecond)},
- {Cond: minCond(100 * time.Millisecond)},
- {Cond: minCond(200 * time.Millisecond)},
- {Cond: minCond(500 * time.Millisecond)},
- {Cond: minCond(1 * time.Second)},
- {Cond: minCond(10 * time.Second)},
- {Cond: minCond(100 * time.Second)},
- {Cond: errorCond{}},
- },
- Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }),
- }
-}
-
-// traceBucket represents a size-capped bucket of historic traces,
-// along with a condition for a trace to belong to the bucket.
-type traceBucket struct {
- Cond cond
-
- // Ring buffer implementation of a fixed-size FIFO queue.
- mu sync.RWMutex
- buf [tracesPerBucket]*trace
- start int // < tracesPerBucket
- length int // <= tracesPerBucket
-}
-
-func (b *traceBucket) Add(tr *trace) {
- b.mu.Lock()
- defer b.mu.Unlock()
-
- i := b.start + b.length
- if i >= tracesPerBucket {
- i -= tracesPerBucket
- }
- if b.length == tracesPerBucket {
- // "Remove" an element from the bucket.
- b.buf[i].unref()
- b.start++
- if b.start == tracesPerBucket {
- b.start = 0
- }
- }
- b.buf[i] = tr
- if b.length < tracesPerBucket {
- b.length++
- }
- tr.ref()
-}
-
-// Copy returns a copy of the traces in the bucket.
-// If tracedOnly is true, only the traces with trace information will be returned.
-// The logs will be ref'd before returning; the caller should call
-// the Free method when it is done with them.
-// TODO(dsymonds): keep track of traced requests in separate buckets.
-func (b *traceBucket) Copy(tracedOnly bool) traceList {
- b.mu.RLock()
- defer b.mu.RUnlock()
-
- trl := make(traceList, 0, b.length)
- for i, x := 0, b.start; i < b.length; i++ {
- tr := b.buf[x]
- if !tracedOnly || tr.spanID != 0 {
- tr.ref()
- trl = append(trl, tr)
- }
- x++
- if x == b.length {
- x = 0
- }
- }
- return trl
-}
-
-func (b *traceBucket) Empty() bool {
- b.mu.RLock()
- defer b.mu.RUnlock()
- return b.length == 0
-}
-
-// cond represents a condition on a trace.
-type cond interface {
- match(t *trace) bool
- String() string
-}
-
-type minCond time.Duration
-
-func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) }
-func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) }
-
-type errorCond struct{}
-
-func (e errorCond) match(t *trace) bool { return t.IsError }
-func (e errorCond) String() string { return "errors" }
-
-type traceList []*trace
-
-// Free calls unref on each element of the list.
-func (trl traceList) Free() {
- for _, t := range trl {
- t.unref()
- }
-}
-
-// traceList may be sorted in reverse chronological order.
-func (trl traceList) Len() int { return len(trl) }
-func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) }
-func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] }
-
-// An event is a timestamped log entry in a trace.
-type event struct {
- When time.Time
- Elapsed time.Duration // since previous event in trace
- NewDay bool // whether this event is on a different day to the previous event
- Recyclable bool // whether this event was passed via LazyLog
- Sensitive bool // whether this event contains sensitive information
- What interface{} // string or fmt.Stringer
-}
-
-// WhenString returns a string representation of the elapsed time of the event.
-// It will include the date if midnight was crossed.
-func (e event) WhenString() string {
- if e.NewDay {
- return e.When.Format("2006/01/02 15:04:05.000000")
- }
- return e.When.Format("15:04:05.000000")
-}
-
-// discarded represents a number of discarded events.
-// It is stored as *discarded to make it easier to update in-place.
-type discarded int
-
-func (d *discarded) String() string {
- return fmt.Sprintf("(%d events discarded)", int(*d))
-}
-
-// trace represents an active or complete request,
-// either sent or received by this program.
-type trace struct {
- // Family is the top-level grouping of traces to which this belongs.
- Family string
-
- // Title is the title of this trace.
- Title string
-
- // Start time of the this trace.
- Start time.Time
-
- mu sync.RWMutex
- events []event // Append-only sequence of events (modulo discards).
- maxEvents int
- recycler func(interface{})
- IsError bool // Whether this trace resulted in an error.
- Elapsed time.Duration // Elapsed time for this trace, zero while active.
- traceID uint64 // Trace information if non-zero.
- spanID uint64
-
- refs int32 // how many buckets this is in
- disc discarded // scratch space to avoid allocation
-
- finishStack []byte // where finish was called, if DebugUseAfterFinish is set
-
- eventsBuf [4]event // preallocated buffer in case we only log a few events
-}
-
-func (tr *trace) reset() {
- // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
- tr.Family = ""
- tr.Title = ""
- tr.Start = time.Time{}
-
- tr.mu.Lock()
- tr.Elapsed = 0
- tr.traceID = 0
- tr.spanID = 0
- tr.IsError = false
- tr.maxEvents = 0
- tr.events = nil
- tr.recycler = nil
- tr.mu.Unlock()
-
- tr.refs = 0
- tr.disc = 0
- tr.finishStack = nil
- for i := range tr.eventsBuf {
- tr.eventsBuf[i] = event{}
- }
-}
-
-// delta returns the elapsed time since the last event or the trace start,
-// and whether it spans midnight.
-// L >= tr.mu
-func (tr *trace) delta(t time.Time) (time.Duration, bool) {
- if len(tr.events) == 0 {
- return t.Sub(tr.Start), false
- }
- prev := tr.events[len(tr.events)-1].When
- return t.Sub(prev), prev.Day() != t.Day()
-}
-
-func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
- if DebugUseAfterFinish && tr.finishStack != nil {
- buf := make([]byte, 4<<10) // 4 KB should be enough
- n := runtime.Stack(buf, false)
- log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n])
- }
-
- /*
- NOTE TO DEBUGGERS
-
- If you are here because your program panicked in this code,
- it is almost definitely the fault of code using this package,
- and very unlikely to be the fault of this code.
-
- The most likely scenario is that some code elsewhere is using
- a trace.Trace after its Finish method is called.
- You can temporarily set the DebugUseAfterFinish var
- to help discover where that is; do not leave that var set,
- since it makes this package much less efficient.
- */
-
- e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive}
- tr.mu.Lock()
- e.Elapsed, e.NewDay = tr.delta(e.When)
- if len(tr.events) < tr.maxEvents {
- tr.events = append(tr.events, e)
- } else {
- // Discard the middle events.
- di := int((tr.maxEvents - 1) / 2)
- if d, ok := tr.events[di].What.(*discarded); ok {
- (*d)++
- } else {
- // disc starts at two to count for the event it is replacing,
- // plus the next one that we are about to drop.
- tr.disc = 2
- if tr.recycler != nil && tr.events[di].Recyclable {
- go tr.recycler(tr.events[di].What)
- }
- tr.events[di].What = &tr.disc
- }
- // The timestamp of the discarded meta-event should be
- // the time of the last event it is representing.
- tr.events[di].When = tr.events[di+1].When
-
- if tr.recycler != nil && tr.events[di+1].Recyclable {
- go tr.recycler(tr.events[di+1].What)
- }
- copy(tr.events[di+1:], tr.events[di+2:])
- tr.events[tr.maxEvents-1] = e
- }
- tr.mu.Unlock()
-}
-
-func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) {
- tr.addEvent(x, true, sensitive)
-}
-
-func (tr *trace) LazyPrintf(format string, a ...interface{}) {
- tr.addEvent(&lazySprintf{format, a}, false, false)
-}
-
-func (tr *trace) SetError() {
- tr.mu.Lock()
- tr.IsError = true
- tr.mu.Unlock()
-}
-
-func (tr *trace) SetRecycler(f func(interface{})) {
- tr.mu.Lock()
- tr.recycler = f
- tr.mu.Unlock()
-}
-
-func (tr *trace) SetTraceInfo(traceID, spanID uint64) {
- tr.mu.Lock()
- tr.traceID, tr.spanID = traceID, spanID
- tr.mu.Unlock()
-}
-
-func (tr *trace) SetMaxEvents(m int) {
- tr.mu.Lock()
- // Always keep at least three events: first, discarded count, last.
- if len(tr.events) == 0 && m > 3 {
- tr.maxEvents = m
- }
- tr.mu.Unlock()
-}
-
-func (tr *trace) ref() {
- atomic.AddInt32(&tr.refs, 1)
-}
-
-func (tr *trace) unref() {
- if atomic.AddInt32(&tr.refs, -1) == 0 {
- tr.mu.RLock()
- if tr.recycler != nil {
- // freeTrace clears tr, so we hold tr.recycler and tr.events here.
- go func(f func(interface{}), es []event) {
- for _, e := range es {
- if e.Recyclable {
- f(e.What)
- }
- }
- }(tr.recycler, tr.events)
- }
- tr.mu.RUnlock()
-
- freeTrace(tr)
- }
-}
-
-func (tr *trace) When() string {
- return tr.Start.Format("2006/01/02 15:04:05.000000")
-}
-
-func (tr *trace) ElapsedTime() string {
- tr.mu.RLock()
- t := tr.Elapsed
- tr.mu.RUnlock()
-
- if t == 0 {
- // Active trace.
- t = time.Since(tr.Start)
- }
- return fmt.Sprintf("%.6f", t.Seconds())
-}
-
-func (tr *trace) Events() []event {
- tr.mu.RLock()
- defer tr.mu.RUnlock()
- return tr.events
-}
-
-var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool?
-
-// newTrace returns a trace ready to use.
-func newTrace() *trace {
- select {
- case tr := <-traceFreeList:
- return tr
- default:
- return new(trace)
- }
-}
-
-// freeTrace adds tr to traceFreeList if there's room.
-// This is non-blocking.
-func freeTrace(tr *trace) {
- if DebugUseAfterFinish {
- return // never reuse
- }
- tr.reset()
- select {
- case traceFreeList <- tr:
- default:
- }
-}
-
-func elapsed(d time.Duration) string {
- b := []byte(fmt.Sprintf("%.6f", d.Seconds()))
-
- // For subsecond durations, blank all zeros before decimal point,
- // and all zeros between the decimal point and the first non-zero digit.
- if d < time.Second {
- dot := bytes.IndexByte(b, '.')
- for i := 0; i < dot; i++ {
- b[i] = ' '
- }
- for i := dot + 1; i < len(b); i++ {
- if b[i] == '0' {
- b[i] = ' '
- } else {
- break
- }
- }
- }
-
- return string(b)
-}
-
-var pageTmplCache *template.Template
-var pageTmplOnce sync.Once
-
-func pageTmpl() *template.Template {
- pageTmplOnce.Do(func() {
- pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{
- "elapsed": elapsed,
- "add": func(a, b int) int { return a + b },
- }).Parse(pageHTML))
- })
- return pageTmplCache
-}
-
-const pageHTML = `
-{{template "Prolog" .}}
-{{template "StatusTable" .}}
-{{template "Epilog" .}}
-
-{{define "Prolog"}}
-
-
- /debug/requests
-
-
-
-
-/debug/requests
-{{end}} {{/* end of Prolog */}}
-
-{{define "StatusTable"}}
-
-{{end}} {{/* end of StatusTable */}}
-
-{{define "Epilog"}}
-{{if $.Traces}}
-
-Family: {{$.Family}}
-
-{{if or $.Expanded $.Traced}}
- [Normal/Summary]
-{{else}}
- [Normal/Summary]
-{{end}}
-
-{{if or (not $.Expanded) $.Traced}}
- [Normal/Expanded]
-{{else}}
- [Normal/Expanded]
-{{end}}
-
-{{if not $.Active}}
- {{if or $.Expanded (not $.Traced)}}
- [Traced/Summary]
- {{else}}
- [Traced/Summary]
- {{end}}
- {{if or (not $.Expanded) (not $.Traced)}}
- [Traced/Expanded]
- {{else}}
- [Traced/Expanded]
- {{end}}
-{{end}}
-
-{{if $.Total}}
-Showing {{len $.Traces}} of {{$.Total}} traces.
-{{end}}
-
-
-
- {{if $.Active}}Active{{else}}Completed{{end}} Requests
-
- | When | Elapsed (s) |
- {{range $tr := $.Traces}}
-
- | {{$tr.When}} |
- {{$tr.ElapsedTime}} |
- {{$tr.Title}} |
- {{/* TODO: include traceID/spanID */}}
-
- {{if $.Expanded}}
- {{range $tr.Events}}
-
- | {{.WhenString}} |
- {{elapsed .Elapsed}} |
- {{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}} |
-
- {{end}}
- {{end}}
- {{end}}
-
-{{end}} {{/* if $.Traces */}}
-
-{{if $.Histogram}}
-Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}
-{{$.Histogram}}
-{{end}} {{/* if $.Histogram */}}
-
-
-
-{{end}} {{/* end of Epilog */}}
-`
diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go
index 03265e888..8c7c475f2 100644
--- a/vendor/golang.org/x/oauth2/internal/doc.go
+++ b/vendor/golang.org/x/oauth2/internal/doc.go
@@ -2,5 +2,5 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package internal contains support packages for oauth2 package.
+// Package internal contains support packages for [golang.org/x/oauth2].
package internal
diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go
index 14989beaf..71ea6ad1f 100644
--- a/vendor/golang.org/x/oauth2/internal/oauth2.go
+++ b/vendor/golang.org/x/oauth2/internal/oauth2.go
@@ -13,7 +13,7 @@ import (
)
// ParseKey converts the binary contents of a private key file
-// to an *rsa.PrivateKey. It detects whether the private key is in a
+// to an [*rsa.PrivateKey]. It detects whether the private key is in a
// PEM container or not. If so, it extracts the private key
// from PEM container before conversion. It only supports PEM
// containers with no passphrase.
diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go
index e83ddeef0..8389f2462 100644
--- a/vendor/golang.org/x/oauth2/internal/token.go
+++ b/vendor/golang.org/x/oauth2/internal/token.go
@@ -10,7 +10,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"math"
"mime"
"net/http"
@@ -26,9 +25,9 @@ import (
// the requests to access protected resources on the OAuth 2.0
// provider's backend.
//
-// This type is a mirror of oauth2.Token and exists to break
+// This type is a mirror of [golang.org/x/oauth2.Token] and exists to break
// an otherwise-circular dependency. Other internal packages
-// should convert this Token into an oauth2.Token before use.
+// should convert this Token into an [golang.org/x/oauth2.Token] before use.
type Token struct {
// AccessToken is the token that authorizes and authenticates
// the requests.
@@ -50,9 +49,16 @@ type Token struct {
// mechanisms for that TokenSource will not be used.
Expiry time.Time
+ // ExpiresIn is the OAuth2 wire format "expires_in" field,
+ // which specifies how many seconds later the token expires,
+ // relative to an unknown time base approximately around "now".
+ // It is the application's responsibility to populate
+ // `Expiry` from `ExpiresIn` when required.
+ ExpiresIn int64 `json:"expires_in,omitempty"`
+
// Raw optionally contains extra metadata from the server
// when updating a token.
- Raw interface{}
+ Raw any
}
// tokenJSON is the struct representing the HTTP response from OAuth2
@@ -99,14 +105,6 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error {
return nil
}
-// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
-//
-// Deprecated: this function no longer does anything. Caller code that
-// wants to avoid potential extra HTTP requests made during
-// auto-probing of the provider's auth style should set
-// Endpoint.AuthStyle.
-func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
-
// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type.
type AuthStyle int
@@ -143,6 +141,11 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache {
return c
}
+type authStyleCacheKey struct {
+ url string
+ clientID string
+}
+
// AuthStyleCache is the set of tokenURLs we've successfully used via
// RetrieveToken and which style auth we ended up using.
// It's called a cache, but it doesn't (yet?) shrink. It's expected that
@@ -150,26 +153,26 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache {
// small.
type AuthStyleCache struct {
mu sync.Mutex
- m map[string]AuthStyle // keyed by tokenURL
+ m map[authStyleCacheKey]AuthStyle
}
// lookupAuthStyle reports which auth style we last used with tokenURL
// when calling RetrieveToken and whether we have ever done so.
-func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) {
+func (c *AuthStyleCache) lookupAuthStyle(tokenURL, clientID string) (style AuthStyle, ok bool) {
c.mu.Lock()
defer c.mu.Unlock()
- style, ok = c.m[tokenURL]
+ style, ok = c.m[authStyleCacheKey{tokenURL, clientID}]
return
}
// setAuthStyle adds an entry to authStyleCache, documented above.
-func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) {
+func (c *AuthStyleCache) setAuthStyle(tokenURL, clientID string, v AuthStyle) {
c.mu.Lock()
defer c.mu.Unlock()
if c.m == nil {
- c.m = make(map[string]AuthStyle)
+ c.m = make(map[authStyleCacheKey]AuthStyle)
}
- c.m[tokenURL] = v
+ c.m[authStyleCacheKey{tokenURL, clientID}] = v
}
// newTokenRequest returns a new *http.Request to retrieve a new token
@@ -210,9 +213,9 @@ func cloneURLValues(v url.Values) url.Values {
}
func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) {
- needsAuthStyleProbe := authStyle == 0
+ needsAuthStyleProbe := authStyle == AuthStyleUnknown
if needsAuthStyleProbe {
- if style, ok := styleCache.lookupAuthStyle(tokenURL); ok {
+ if style, ok := styleCache.lookupAuthStyle(tokenURL, clientID); ok {
authStyle = style
needsAuthStyleProbe = false
} else {
@@ -242,7 +245,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
token, err = doTokenRoundTrip(ctx, req)
}
if needsAuthStyleProbe && err == nil {
- styleCache.setAuthStyle(tokenURL, authStyle)
+ styleCache.setAuthStyle(tokenURL, clientID, authStyle)
}
// Don't overwrite `RefreshToken` with an empty value
// if this was a token refreshing request.
@@ -257,7 +260,7 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
if err != nil {
return nil, err
}
- body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
+ body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20))
r.Body.Close()
if err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
@@ -312,7 +315,8 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
TokenType: tj.TokenType,
RefreshToken: tj.RefreshToken,
Expiry: tj.expiry(),
- Raw: make(map[string]interface{}),
+ ExpiresIn: int64(tj.ExpiresIn),
+ Raw: make(map[string]any),
}
json.Unmarshal(body, &token.Raw) // no error checks for optional fields
}
diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go
index b9db01ddf..afc0aeb27 100644
--- a/vendor/golang.org/x/oauth2/internal/transport.go
+++ b/vendor/golang.org/x/oauth2/internal/transport.go
@@ -9,8 +9,8 @@ import (
"net/http"
)
-// HTTPClient is the context key to use with golang.org/x/net/context's
-// WithValue function to associate an *http.Client value with a context.
+// HTTPClient is the context key to use with [context.WithValue]
+// to associate an [*http.Client] value with a context.
var HTTPClient ContextKey
// ContextKey is just an empty struct. It exists so HTTPClient can be
diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go
index 74f052aa9..de34feb84 100644
--- a/vendor/golang.org/x/oauth2/oauth2.go
+++ b/vendor/golang.org/x/oauth2/oauth2.go
@@ -22,9 +22,9 @@ import (
)
// NoContext is the default context you should supply if not using
-// your own context.Context (see https://golang.org/x/net/context).
+// your own [context.Context].
//
-// Deprecated: Use context.Background() or context.TODO() instead.
+// Deprecated: Use [context.Background] or [context.TODO] instead.
var NoContext = context.TODO()
// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
@@ -37,8 +37,8 @@ func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
// Config describes a typical 3-legged OAuth2 flow, with both the
// client application information and the server's endpoint URLs.
-// For the client credentials 2-legged OAuth2 flow, see the clientcredentials
-// package (https://golang.org/x/oauth2/clientcredentials).
+// For the client credentials 2-legged OAuth2 flow, see the
+// [golang.org/x/oauth2/clientcredentials] package.
type Config struct {
// ClientID is the application's ID.
ClientID string
@@ -46,7 +46,7 @@ type Config struct {
// ClientSecret is the application's secret.
ClientSecret string
- // Endpoint contains the resource server's token endpoint
+ // Endpoint contains the authorization server's token endpoint
// URLs. These are constants specific to each server and are
// often available via site-specific packages, such as
// google.Endpoint or github.Endpoint.
@@ -135,7 +135,7 @@ type setParam struct{ k, v string }
func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
-// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
+// SetAuthURLParam builds an [AuthCodeOption] which passes key/value parameters
// to a provider's authorization endpoint.
func SetAuthURLParam(key, value string) AuthCodeOption {
return setParam{key, value}
@@ -148,8 +148,8 @@ func SetAuthURLParam(key, value string) AuthCodeOption {
// request and callback. The authorization server includes this value when
// redirecting the user agent back to the client.
//
-// Opts may include AccessTypeOnline or AccessTypeOffline, as well
-// as ApprovalForce.
+// Opts may include [AccessTypeOnline] or [AccessTypeOffline], as well
+// as [ApprovalForce].
//
// To protect against CSRF attacks, opts should include a PKCE challenge
// (S256ChallengeOption). Not all servers support PKCE. An alternative is to
@@ -194,7 +194,7 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
// and when other authorization grant types are not available."
// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
//
-// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
+// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable.
func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
v := url.Values{
"grant_type": {"password"},
@@ -212,10 +212,10 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor
// It is used after a resource provider redirects the user back
// to the Redirect URI (the URL obtained from AuthCodeURL).
//
-// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
+// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable.
//
-// The code will be in the *http.Request.FormValue("code"). Before
-// calling Exchange, be sure to validate FormValue("state") if you are
+// The code will be in the [http.Request.FormValue]("code"). Before
+// calling Exchange, be sure to validate [http.Request.FormValue]("state") if you are
// using it to protect against CSRF attacks.
//
// If using PKCE to protect against CSRF attacks, opts should include a
@@ -242,10 +242,10 @@ func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
return NewClient(ctx, c.TokenSource(ctx, t))
}
-// TokenSource returns a TokenSource that returns t until t expires,
+// TokenSource returns a [TokenSource] that returns t until t expires,
// automatically refreshing it as necessary using the provided context.
//
-// Most users will use Config.Client instead.
+// Most users will use [Config.Client] instead.
func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
tkr := &tokenRefresher{
ctx: ctx,
@@ -260,7 +260,7 @@ func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
}
}
-// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
+// tokenRefresher is a TokenSource that makes "grant_type=refresh_token"
// HTTP requests to renew a token using a RefreshToken.
type tokenRefresher struct {
ctx context.Context // used to get HTTP requests
@@ -288,7 +288,7 @@ func (tf *tokenRefresher) Token() (*Token, error) {
if tf.refreshToken != tk.RefreshToken {
tf.refreshToken = tk.RefreshToken
}
- return tk, err
+ return tk, nil
}
// reuseTokenSource is a TokenSource that holds a single token in memory
@@ -305,8 +305,7 @@ type reuseTokenSource struct {
}
// Token returns the current token if it's still valid, else will
-// refresh the current token (using r.Context for HTTP client
-// information) and return the new one.
+// refresh the current token and return the new one.
func (s *reuseTokenSource) Token() (*Token, error) {
s.mu.Lock()
defer s.mu.Unlock()
@@ -322,7 +321,7 @@ func (s *reuseTokenSource) Token() (*Token, error) {
return t, nil
}
-// StaticTokenSource returns a TokenSource that always returns the same token.
+// StaticTokenSource returns a [TokenSource] that always returns the same token.
// Because the provided token t is never refreshed, StaticTokenSource is only
// useful for tokens that never expire.
func StaticTokenSource(t *Token) TokenSource {
@@ -338,16 +337,16 @@ func (s staticTokenSource) Token() (*Token, error) {
return s.t, nil
}
-// HTTPClient is the context key to use with golang.org/x/net/context's
-// WithValue function to associate an *http.Client value with a context.
+// HTTPClient is the context key to use with [context.WithValue]
+// to associate a [*http.Client] value with a context.
var HTTPClient internal.ContextKey
-// NewClient creates an *http.Client from a Context and TokenSource.
+// NewClient creates an [*http.Client] from a [context.Context] and [TokenSource].
// The returned client is not valid beyond the lifetime of the context.
//
-// Note that if a custom *http.Client is provided via the Context it
+// Note that if a custom [*http.Client] is provided via the [context.Context] it
// is used only for token acquisition and is not used to configure the
-// *http.Client returned from NewClient.
+// [*http.Client] returned from NewClient.
//
// As a special case, if src is nil, a non-OAuth2 client is returned
// using the provided context. This exists to support related OAuth2
@@ -356,15 +355,19 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client {
if src == nil {
return internal.ContextClient(ctx)
}
+ cc := internal.ContextClient(ctx)
return &http.Client{
Transport: &Transport{
- Base: internal.ContextClient(ctx).Transport,
+ Base: cc.Transport,
Source: ReuseTokenSource(nil, src),
},
+ CheckRedirect: cc.CheckRedirect,
+ Jar: cc.Jar,
+ Timeout: cc.Timeout,
}
}
-// ReuseTokenSource returns a TokenSource which repeatedly returns the
+// ReuseTokenSource returns a [TokenSource] which repeatedly returns the
// same token as long as it's valid, starting with t.
// When its cached token is invalid, a new token is obtained from src.
//
@@ -372,10 +375,10 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client {
// (such as a file on disk) between runs of a program, rather than
// obtaining new tokens unnecessarily.
//
-// The initial token t may be nil, in which case the TokenSource is
+// The initial token t may be nil, in which case the [TokenSource] is
// wrapped in a caching version if it isn't one already. This also
// means it's always safe to wrap ReuseTokenSource around any other
-// TokenSource without adverse effects.
+// [TokenSource] without adverse effects.
func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
// Don't wrap a reuseTokenSource in itself. That would work,
// but cause an unnecessary number of mutex operations.
@@ -393,8 +396,8 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
}
}
-// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the
-// TokenSource returned by ReuseTokenSource, except the expiry buffer is
+// ReuseTokenSourceWithExpiry returns a [TokenSource] that acts in the same manner as the
+// [TokenSource] returned by [ReuseTokenSource], except the expiry buffer is
// configurable. The expiration time of a token is calculated as
// t.Expiry.Add(-earlyExpiry).
func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource {
diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go
index 50593b6df..cea8374d5 100644
--- a/vendor/golang.org/x/oauth2/pkce.go
+++ b/vendor/golang.org/x/oauth2/pkce.go
@@ -1,6 +1,7 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+
package oauth2
import (
@@ -20,9 +21,9 @@ const (
// This follows recommendations in RFC 7636.
//
// A fresh verifier should be generated for each authorization.
-// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL
-// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange
-// (or Config.DeviceAccessToken).
+// The resulting verifier should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth]
+// with [S256ChallengeOption], and to [Config.Exchange] or [Config.DeviceAccessToken]
+// with [VerifierOption].
func GenerateVerifier() string {
// "RECOMMENDED that the output of a suitable random number generator be
// used to create a 32-octet sequence. The octet sequence is then
@@ -36,22 +37,22 @@ func GenerateVerifier() string {
return base64.RawURLEncoding.EncodeToString(data)
}
-// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be
-// passed to Config.Exchange or Config.DeviceAccessToken only.
+// VerifierOption returns a PKCE code verifier [AuthCodeOption]. It should only be
+// passed to [Config.Exchange] or [Config.DeviceAccessToken].
func VerifierOption(verifier string) AuthCodeOption {
return setParam{k: codeVerifierKey, v: verifier}
}
// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256.
//
-// Prefer to use S256ChallengeOption where possible.
+// Prefer to use [S256ChallengeOption] where possible.
func S256ChallengeFromVerifier(verifier string) string {
sha := sha256.Sum256([]byte(verifier))
return base64.RawURLEncoding.EncodeToString(sha[:])
}
// S256ChallengeOption derives a PKCE code challenge derived from verifier with
-// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess
+// method S256. It should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth]
// only.
func S256ChallengeOption(verifier string) AuthCodeOption {
return challengeOption{
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
index 109997d77..239ec3296 100644
--- a/vendor/golang.org/x/oauth2/token.go
+++ b/vendor/golang.org/x/oauth2/token.go
@@ -44,7 +44,7 @@ type Token struct {
// Expiry is the optional expiration time of the access token.
//
- // If zero, TokenSource implementations will reuse the same
+ // If zero, [TokenSource] implementations will reuse the same
// token forever and RefreshToken or equivalent
// mechanisms for that TokenSource will not be used.
Expiry time.Time `json:"expiry,omitempty"`
@@ -58,7 +58,7 @@ type Token struct {
// raw optionally contains extra metadata from the server
// when updating a token.
- raw interface{}
+ raw any
// expiryDelta is used to calculate when a token is considered
// expired, by subtracting from Expiry. If zero, defaultExpiryDelta
@@ -86,16 +86,16 @@ func (t *Token) Type() string {
// SetAuthHeader sets the Authorization header to r using the access
// token in t.
//
-// This method is unnecessary when using Transport or an HTTP Client
+// This method is unnecessary when using [Transport] or an HTTP Client
// returned by this package.
func (t *Token) SetAuthHeader(r *http.Request) {
r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
}
-// WithExtra returns a new Token that's a clone of t, but using the
+// WithExtra returns a new [Token] that's a clone of t, but using the
// provided raw extra map. This is only intended for use by packages
// implementing derivative OAuth2 flows.
-func (t *Token) WithExtra(extra interface{}) *Token {
+func (t *Token) WithExtra(extra any) *Token {
t2 := new(Token)
*t2 = *t
t2.raw = extra
@@ -105,8 +105,8 @@ func (t *Token) WithExtra(extra interface{}) *Token {
// Extra returns an extra field.
// Extra fields are key-value pairs returned by the server as a
// part of the token retrieval response.
-func (t *Token) Extra(key string) interface{} {
- if raw, ok := t.raw.(map[string]interface{}); ok {
+func (t *Token) Extra(key string) any {
+ if raw, ok := t.raw.(map[string]any); ok {
return raw[key]
}
@@ -163,13 +163,14 @@ func tokenFromInternal(t *internal.Token) *Token {
TokenType: t.TokenType,
RefreshToken: t.RefreshToken,
Expiry: t.Expiry,
+ ExpiresIn: t.ExpiresIn,
raw: t.Raw,
}
}
// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
-// with an error..
+// with an error.
func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get())
if err != nil {
diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go
index 90657915f..8bbebbac9 100644
--- a/vendor/golang.org/x/oauth2/transport.go
+++ b/vendor/golang.org/x/oauth2/transport.go
@@ -11,12 +11,12 @@ import (
"sync"
)
-// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
-// wrapping a base RoundTripper and adding an Authorization header
-// with a token from the supplied Sources.
+// Transport is an [http.RoundTripper] that makes OAuth 2.0 HTTP requests,
+// wrapping a base [http.RoundTripper] and adding an Authorization header
+// with a token from the supplied [TokenSource].
//
// Transport is a low-level mechanism. Most code will use the
-// higher-level Config.Client method instead.
+// higher-level [Config.Client] method instead.
type Transport struct {
// Source supplies the token to add to outgoing requests'
// Authorization headers.
@@ -47,7 +47,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
return nil, err
}
- req2 := cloneRequest(req) // per RoundTripper contract
+ req2 := req.Clone(req.Context())
token.SetAuthHeader(req2)
// req.Body is assumed to be closed by the base RoundTripper.
@@ -73,17 +73,3 @@ func (t *Transport) base() http.RoundTripper {
}
return http.DefaultTransport
}
-
-// cloneRequest returns a clone of the provided *http.Request.
-// The clone is a shallow copy of the struct and its Header map.
-func cloneRequest(r *http.Request) *http.Request {
- // shallow copy of the struct
- r2 := new(http.Request)
- *r2 = *r
- // deep copy of the Header
- r2.Header = make(http.Header, len(r.Header))
- for k, s := range r.Header {
- r2.Header[k] = append([]string(nil), s...)
- }
- return r2
-}
diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go
index f8c3c0926..cb6bb9ad3 100644
--- a/vendor/golang.org/x/sync/errgroup/errgroup.go
+++ b/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -12,6 +12,8 @@ package errgroup
import (
"context"
"fmt"
+ "runtime"
+ "runtime/debug"
"sync"
)
@@ -31,6 +33,10 @@ type Group struct {
errOnce sync.Once
err error
+
+ mu sync.Mutex
+ panicValue any // = PanicError | PanicValue; non-nil if some Group.Go goroutine panicked.
+ abnormal bool // some Group.Go goroutine terminated abnormally (panic or goexit).
}
func (g *Group) done() {
@@ -50,33 +56,78 @@ func WithContext(ctx context.Context) (*Group, context.Context) {
return &Group{cancel: cancel}, ctx
}
-// Wait blocks until all function calls from the Go method have returned, then
-// returns the first non-nil error (if any) from them.
+// Wait blocks until all function calls from the Go method have returned
+// normally, then returns the first non-nil error (if any) from them.
+//
+// If any of the calls panics, Wait panics with a [PanicValue];
+// and if any of them calls [runtime.Goexit], Wait calls runtime.Goexit.
func (g *Group) Wait() error {
g.wg.Wait()
if g.cancel != nil {
g.cancel(g.err)
}
+ if g.panicValue != nil {
+ panic(g.panicValue)
+ }
+ if g.abnormal {
+ runtime.Goexit()
+ }
return g.err
}
// Go calls the given function in a new goroutine.
+//
// The first call to Go must happen before a Wait.
// It blocks until the new goroutine can be added without the number of
-// active goroutines in the group exceeding the configured limit.
+// goroutines in the group exceeding the configured limit.
//
-// The first call to return a non-nil error cancels the group's context, if the
-// group was created by calling WithContext. The error will be returned by Wait.
+// The first goroutine in the group that returns a non-nil error, panics, or
+// invokes [runtime.Goexit] will cancel the associated Context, if any.
func (g *Group) Go(f func() error) {
if g.sem != nil {
g.sem <- token{}
}
+ g.add(f)
+}
+
+func (g *Group) add(f func() error) {
g.wg.Add(1)
go func() {
defer g.done()
+ normalReturn := false
+ defer func() {
+ if normalReturn {
+ return
+ }
+ v := recover()
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ if !g.abnormal {
+ if g.cancel != nil {
+ g.cancel(g.err)
+ }
+ g.abnormal = true
+ }
+ if v != nil && g.panicValue == nil {
+ switch v := v.(type) {
+ case error:
+ g.panicValue = PanicError{
+ Recovered: v,
+ Stack: debug.Stack(),
+ }
+ default:
+ g.panicValue = PanicValue{
+ Recovered: v,
+ Stack: debug.Stack(),
+ }
+ }
+ }
+ }()
- if err := f(); err != nil {
+ err := f()
+ normalReturn = true
+ if err != nil {
g.errOnce.Do(func() {
g.err = err
if g.cancel != nil {
@@ -101,19 +152,7 @@ func (g *Group) TryGo(f func() error) bool {
}
}
- g.wg.Add(1)
- go func() {
- defer g.done()
-
- if err := f(); err != nil {
- g.errOnce.Do(func() {
- g.err = err
- if g.cancel != nil {
- g.cancel(g.err)
- }
- })
- }
- }()
+ g.add(f)
return true
}
@@ -135,3 +174,34 @@ func (g *Group) SetLimit(n int) {
}
g.sem = make(chan token, n)
}
+
+// PanicError wraps an error recovered from an unhandled panic
+// when calling a function passed to Go or TryGo.
+type PanicError struct {
+ Recovered error
+ Stack []byte // result of call to [debug.Stack]
+}
+
+func (p PanicError) Error() string {
+ if len(p.Stack) > 0 {
+ return fmt.Sprintf("recovered from errgroup.Group: %v\n%s", p.Recovered, p.Stack)
+ }
+ return fmt.Sprintf("recovered from errgroup.Group: %v", p.Recovered)
+}
+
+func (p PanicError) Unwrap() error { return p.Recovered }
+
+// PanicValue wraps a value that does not implement the error interface,
+// recovered from an unhandled panic when calling a function passed to Go or
+// TryGo.
+type PanicValue struct {
+ Recovered any
+ Stack []byte // result of call to [debug.Stack]
+}
+
+func (p PanicValue) String() string {
+ if len(p.Stack) > 0 {
+ return fmt.Sprintf("recovered from errgroup.Group: %v\n%s", p.Recovered, p.Stack)
+ }
+ return fmt.Sprintf("recovered from errgroup.Group: %v", p.Recovered)
+}
diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go
deleted file mode 100644
index b618162aa..000000000
--- a/vendor/golang.org/x/sync/semaphore/semaphore.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package semaphore provides a weighted semaphore implementation.
-package semaphore // import "golang.org/x/sync/semaphore"
-
-import (
- "container/list"
- "context"
- "sync"
-)
-
-type waiter struct {
- n int64
- ready chan<- struct{} // Closed when semaphore acquired.
-}
-
-// NewWeighted creates a new weighted semaphore with the given
-// maximum combined weight for concurrent access.
-func NewWeighted(n int64) *Weighted {
- w := &Weighted{size: n}
- return w
-}
-
-// Weighted provides a way to bound concurrent access to a resource.
-// The callers can request access with a given weight.
-type Weighted struct {
- size int64
- cur int64
- mu sync.Mutex
- waiters list.List
-}
-
-// Acquire acquires the semaphore with a weight of n, blocking until resources
-// are available or ctx is done. On success, returns nil. On failure, returns
-// ctx.Err() and leaves the semaphore unchanged.
-func (s *Weighted) Acquire(ctx context.Context, n int64) error {
- done := ctx.Done()
-
- s.mu.Lock()
- select {
- case <-done:
- // ctx becoming done has "happened before" acquiring the semaphore,
- // whether it became done before the call began or while we were
- // waiting for the mutex. We prefer to fail even if we could acquire
- // the mutex without blocking.
- s.mu.Unlock()
- return ctx.Err()
- default:
- }
- if s.size-s.cur >= n && s.waiters.Len() == 0 {
- // Since we hold s.mu and haven't synchronized since checking done, if
- // ctx becomes done before we return here, it becoming done must have
- // "happened concurrently" with this call - it cannot "happen before"
- // we return in this branch. So, we're ok to always acquire here.
- s.cur += n
- s.mu.Unlock()
- return nil
- }
-
- if n > s.size {
- // Don't make other Acquire calls block on one that's doomed to fail.
- s.mu.Unlock()
- <-done
- return ctx.Err()
- }
-
- ready := make(chan struct{})
- w := waiter{n: n, ready: ready}
- elem := s.waiters.PushBack(w)
- s.mu.Unlock()
-
- select {
- case <-done:
- s.mu.Lock()
- select {
- case <-ready:
- // Acquired the semaphore after we were canceled.
- // Pretend we didn't and put the tokens back.
- s.cur -= n
- s.notifyWaiters()
- default:
- isFront := s.waiters.Front() == elem
- s.waiters.Remove(elem)
- // If we're at the front and there're extra tokens left, notify other waiters.
- if isFront && s.size > s.cur {
- s.notifyWaiters()
- }
- }
- s.mu.Unlock()
- return ctx.Err()
-
- case <-ready:
- // Acquired the semaphore. Check that ctx isn't already done.
- // We check the done channel instead of calling ctx.Err because we
- // already have the channel, and ctx.Err is O(n) with the nesting
- // depth of ctx.
- select {
- case <-done:
- s.Release(n)
- return ctx.Err()
- default:
- }
- return nil
- }
-}
-
-// TryAcquire acquires the semaphore with a weight of n without blocking.
-// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
-func (s *Weighted) TryAcquire(n int64) bool {
- s.mu.Lock()
- success := s.size-s.cur >= n && s.waiters.Len() == 0
- if success {
- s.cur += n
- }
- s.mu.Unlock()
- return success
-}
-
-// Release releases the semaphore with a weight of n.
-func (s *Weighted) Release(n int64) {
- s.mu.Lock()
- s.cur -= n
- if s.cur < 0 {
- s.mu.Unlock()
- panic("semaphore: released more than held")
- }
- s.notifyWaiters()
- s.mu.Unlock()
-}
-
-func (s *Weighted) notifyWaiters() {
- for {
- next := s.waiters.Front()
- if next == nil {
- break // No more waiters blocked.
- }
-
- w := next.Value.(waiter)
- if s.size-s.cur < w.n {
- // Not enough tokens for the next waiter. We could keep going (to try to
- // find a waiter with a smaller request), but under load that could cause
- // starvation for large requests; instead, we leave all remaining waiters
- // blocked.
- //
- // Consider a semaphore used as a read-write lock, with N tokens, N
- // readers, and one writer. Each reader can Acquire(1) to obtain a read
- // lock. The writer can Acquire(N) to obtain a write lock, excluding all
- // of the readers. If we allow the readers to jump ahead in the queue,
- // the writer will starve — there is always one token available for every
- // reader.
- break
- }
-
- s.cur += w.n
- s.waiters.Remove(next)
- close(w.ready)
- }
-}
diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go
index b6e1ab76f..a8b0364c7 100644
--- a/vendor/golang.org/x/sys/windows/security_windows.go
+++ b/vendor/golang.org/x/sys/windows/security_windows.go
@@ -1303,7 +1303,10 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DE
return nil, err
}
if absoluteSDSize > 0 {
- absoluteSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, absoluteSDSize)[0]))
+ absoluteSD = new(SECURITY_DESCRIPTOR)
+ if unsafe.Sizeof(*absoluteSD) < uintptr(absoluteSDSize) {
+ panic("sizeof(SECURITY_DESCRIPTOR) too small")
+ }
}
var (
dacl *ACL
@@ -1312,19 +1315,55 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DE
group *SID
)
if daclSize > 0 {
- dacl = (*ACL)(unsafe.Pointer(&make([]byte, daclSize)[0]))
+ dacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, daclSize))))
}
if saclSize > 0 {
- sacl = (*ACL)(unsafe.Pointer(&make([]byte, saclSize)[0]))
+ sacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, saclSize))))
}
if ownerSize > 0 {
- owner = (*SID)(unsafe.Pointer(&make([]byte, ownerSize)[0]))
+ owner = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, ownerSize))))
}
if groupSize > 0 {
- group = (*SID)(unsafe.Pointer(&make([]byte, groupSize)[0]))
+ group = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, groupSize))))
}
+ // We call into Windows via makeAbsoluteSD, which sets up
+ // pointers within absoluteSD that point to other chunks of memory
+ // we pass into makeAbsoluteSD, and that happens outside the view of the GC.
+ // We therefore take some care here to then verify the pointers are as we expect
+ // and set them explicitly in view of the GC. See https://go.dev/issue/73199.
+ // TODO: consider weak pointers once Go 1.24 is appropriate. See suggestion in https://go.dev/cl/663575.
err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize,
dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize)
+ if err != nil {
+ // Don't return absoluteSD, which might be partially initialized.
+ return nil, err
+ }
+ // Before using any fields, verify absoluteSD is in the format we expect according to Windows.
+ // See https://learn.microsoft.com/en-us/windows/win32/secauthz/absolute-and-self-relative-security-descriptors
+ absControl, _, err := absoluteSD.Control()
+ if err != nil {
+ panic("absoluteSD: " + err.Error())
+ }
+ if absControl&SE_SELF_RELATIVE != 0 {
+ panic("absoluteSD not in absolute format")
+ }
+ if absoluteSD.dacl != dacl {
+ panic("dacl pointer mismatch")
+ }
+ if absoluteSD.sacl != sacl {
+ panic("sacl pointer mismatch")
+ }
+ if absoluteSD.owner != owner {
+ panic("owner pointer mismatch")
+ }
+ if absoluteSD.group != group {
+ panic("group pointer mismatch")
+ }
+ absoluteSD.dacl = dacl
+ absoluteSD.sacl = sacl
+ absoluteSD.owner = owner
+ absoluteSD.group = group
+
return
}
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 4a3254386..640f6b153 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -870,6 +870,7 @@ const socket_error = uintptr(^uint32(0))
//sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom
//sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo
//sys WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.WSASocketW
+//sys WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) [failretval!=0] = ws2_32.WSADuplicateSocketW
//sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname
//sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname
//sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs
@@ -1698,8 +1699,9 @@ func NewNTUnicodeString(s string) (*NTUnicodeString, error) {
// Slice returns a uint16 slice that aliases the data in the NTUnicodeString.
func (s *NTUnicodeString) Slice() []uint16 {
- slice := unsafe.Slice(s.Buffer, s.MaximumLength)
- return slice[:s.Length]
+ // Note: this rounds the length down, if it happens
+ // to (incorrectly) be odd. Probably safer than rounding up.
+ return unsafe.Slice(s.Buffer, s.MaximumLength/2)[:s.Length/2]
}
func (s *NTUnicodeString) String() string {
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index ad67df2fd..958bcf47a 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -2700,6 +2700,8 @@ type CommTimeouts struct {
// NTUnicodeString is a UTF-16 string for NT native APIs, corresponding to UNICODE_STRING.
type NTUnicodeString struct {
+ // Note: Length and MaximumLength are in *bytes*, not uint16s.
+ // They should always be even.
Length uint16
MaximumLength uint16
Buffer *uint16
@@ -3628,3 +3630,213 @@ const (
KLF_NOTELLSHELL = 0x00000080
KLF_SETFORPROCESS = 0x00000100
)
+
+// Virtual Key codes
+// https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes
+const (
+ VK_LBUTTON = 0x01
+ VK_RBUTTON = 0x02
+ VK_CANCEL = 0x03
+ VK_MBUTTON = 0x04
+ VK_XBUTTON1 = 0x05
+ VK_XBUTTON2 = 0x06
+ VK_BACK = 0x08
+ VK_TAB = 0x09
+ VK_CLEAR = 0x0C
+ VK_RETURN = 0x0D
+ VK_SHIFT = 0x10
+ VK_CONTROL = 0x11
+ VK_MENU = 0x12
+ VK_PAUSE = 0x13
+ VK_CAPITAL = 0x14
+ VK_KANA = 0x15
+ VK_HANGEUL = 0x15
+ VK_HANGUL = 0x15
+ VK_IME_ON = 0x16
+ VK_JUNJA = 0x17
+ VK_FINAL = 0x18
+ VK_HANJA = 0x19
+ VK_KANJI = 0x19
+ VK_IME_OFF = 0x1A
+ VK_ESCAPE = 0x1B
+ VK_CONVERT = 0x1C
+ VK_NONCONVERT = 0x1D
+ VK_ACCEPT = 0x1E
+ VK_MODECHANGE = 0x1F
+ VK_SPACE = 0x20
+ VK_PRIOR = 0x21
+ VK_NEXT = 0x22
+ VK_END = 0x23
+ VK_HOME = 0x24
+ VK_LEFT = 0x25
+ VK_UP = 0x26
+ VK_RIGHT = 0x27
+ VK_DOWN = 0x28
+ VK_SELECT = 0x29
+ VK_PRINT = 0x2A
+ VK_EXECUTE = 0x2B
+ VK_SNAPSHOT = 0x2C
+ VK_INSERT = 0x2D
+ VK_DELETE = 0x2E
+ VK_HELP = 0x2F
+ VK_LWIN = 0x5B
+ VK_RWIN = 0x5C
+ VK_APPS = 0x5D
+ VK_SLEEP = 0x5F
+ VK_NUMPAD0 = 0x60
+ VK_NUMPAD1 = 0x61
+ VK_NUMPAD2 = 0x62
+ VK_NUMPAD3 = 0x63
+ VK_NUMPAD4 = 0x64
+ VK_NUMPAD5 = 0x65
+ VK_NUMPAD6 = 0x66
+ VK_NUMPAD7 = 0x67
+ VK_NUMPAD8 = 0x68
+ VK_NUMPAD9 = 0x69
+ VK_MULTIPLY = 0x6A
+ VK_ADD = 0x6B
+ VK_SEPARATOR = 0x6C
+ VK_SUBTRACT = 0x6D
+ VK_DECIMAL = 0x6E
+ VK_DIVIDE = 0x6F
+ VK_F1 = 0x70
+ VK_F2 = 0x71
+ VK_F3 = 0x72
+ VK_F4 = 0x73
+ VK_F5 = 0x74
+ VK_F6 = 0x75
+ VK_F7 = 0x76
+ VK_F8 = 0x77
+ VK_F9 = 0x78
+ VK_F10 = 0x79
+ VK_F11 = 0x7A
+ VK_F12 = 0x7B
+ VK_F13 = 0x7C
+ VK_F14 = 0x7D
+ VK_F15 = 0x7E
+ VK_F16 = 0x7F
+ VK_F17 = 0x80
+ VK_F18 = 0x81
+ VK_F19 = 0x82
+ VK_F20 = 0x83
+ VK_F21 = 0x84
+ VK_F22 = 0x85
+ VK_F23 = 0x86
+ VK_F24 = 0x87
+ VK_NUMLOCK = 0x90
+ VK_SCROLL = 0x91
+ VK_OEM_NEC_EQUAL = 0x92
+ VK_OEM_FJ_JISHO = 0x92
+ VK_OEM_FJ_MASSHOU = 0x93
+ VK_OEM_FJ_TOUROKU = 0x94
+ VK_OEM_FJ_LOYA = 0x95
+ VK_OEM_FJ_ROYA = 0x96
+ VK_LSHIFT = 0xA0
+ VK_RSHIFT = 0xA1
+ VK_LCONTROL = 0xA2
+ VK_RCONTROL = 0xA3
+ VK_LMENU = 0xA4
+ VK_RMENU = 0xA5
+ VK_BROWSER_BACK = 0xA6
+ VK_BROWSER_FORWARD = 0xA7
+ VK_BROWSER_REFRESH = 0xA8
+ VK_BROWSER_STOP = 0xA9
+ VK_BROWSER_SEARCH = 0xAA
+ VK_BROWSER_FAVORITES = 0xAB
+ VK_BROWSER_HOME = 0xAC
+ VK_VOLUME_MUTE = 0xAD
+ VK_VOLUME_DOWN = 0xAE
+ VK_VOLUME_UP = 0xAF
+ VK_MEDIA_NEXT_TRACK = 0xB0
+ VK_MEDIA_PREV_TRACK = 0xB1
+ VK_MEDIA_STOP = 0xB2
+ VK_MEDIA_PLAY_PAUSE = 0xB3
+ VK_LAUNCH_MAIL = 0xB4
+ VK_LAUNCH_MEDIA_SELECT = 0xB5
+ VK_LAUNCH_APP1 = 0xB6
+ VK_LAUNCH_APP2 = 0xB7
+ VK_OEM_1 = 0xBA
+ VK_OEM_PLUS = 0xBB
+ VK_OEM_COMMA = 0xBC
+ VK_OEM_MINUS = 0xBD
+ VK_OEM_PERIOD = 0xBE
+ VK_OEM_2 = 0xBF
+ VK_OEM_3 = 0xC0
+ VK_OEM_4 = 0xDB
+ VK_OEM_5 = 0xDC
+ VK_OEM_6 = 0xDD
+ VK_OEM_7 = 0xDE
+ VK_OEM_8 = 0xDF
+ VK_OEM_AX = 0xE1
+ VK_OEM_102 = 0xE2
+ VK_ICO_HELP = 0xE3
+ VK_ICO_00 = 0xE4
+ VK_PROCESSKEY = 0xE5
+ VK_ICO_CLEAR = 0xE6
+ VK_OEM_RESET = 0xE9
+ VK_OEM_JUMP = 0xEA
+ VK_OEM_PA1 = 0xEB
+ VK_OEM_PA2 = 0xEC
+ VK_OEM_PA3 = 0xED
+ VK_OEM_WSCTRL = 0xEE
+ VK_OEM_CUSEL = 0xEF
+ VK_OEM_ATTN = 0xF0
+ VK_OEM_FINISH = 0xF1
+ VK_OEM_COPY = 0xF2
+ VK_OEM_AUTO = 0xF3
+ VK_OEM_ENLW = 0xF4
+ VK_OEM_BACKTAB = 0xF5
+ VK_ATTN = 0xF6
+ VK_CRSEL = 0xF7
+ VK_EXSEL = 0xF8
+ VK_EREOF = 0xF9
+ VK_PLAY = 0xFA
+ VK_ZOOM = 0xFB
+ VK_NONAME = 0xFC
+ VK_PA1 = 0xFD
+ VK_OEM_CLEAR = 0xFE
+)
+
+// Mouse button constants.
+// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str
+const (
+ FROM_LEFT_1ST_BUTTON_PRESSED = 0x0001
+ RIGHTMOST_BUTTON_PRESSED = 0x0002
+ FROM_LEFT_2ND_BUTTON_PRESSED = 0x0004
+ FROM_LEFT_3RD_BUTTON_PRESSED = 0x0008
+ FROM_LEFT_4TH_BUTTON_PRESSED = 0x0010
+)
+
+// Control key state constaints.
+// https://docs.microsoft.com/en-us/windows/console/key-event-record-str
+// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str
+const (
+ CAPSLOCK_ON = 0x0080
+ ENHANCED_KEY = 0x0100
+ LEFT_ALT_PRESSED = 0x0002
+ LEFT_CTRL_PRESSED = 0x0008
+ NUMLOCK_ON = 0x0020
+ RIGHT_ALT_PRESSED = 0x0001
+ RIGHT_CTRL_PRESSED = 0x0004
+ SCROLLLOCK_ON = 0x0040
+ SHIFT_PRESSED = 0x0010
+)
+
+// Mouse event record event flags.
+// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str
+const (
+ MOUSE_MOVED = 0x0001
+ DOUBLE_CLICK = 0x0002
+ MOUSE_WHEELED = 0x0004
+ MOUSE_HWHEELED = 0x0008
+)
+
+// Input Record Event Types
+// https://learn.microsoft.com/en-us/windows/console/input-record-str
+const (
+ FOCUS_EVENT = 0x0010
+ KEY_EVENT = 0x0001
+ MENU_EVENT = 0x0008
+ MOUSE_EVENT = 0x0002
+ WINDOW_BUFFER_SIZE_EVENT = 0x0004
+)
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 01c0716c2..a58bc48b8 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -511,6 +511,7 @@ var (
procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW")
procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW")
procWSACleanup = modws2_32.NewProc("WSACleanup")
+ procWSADuplicateSocketW = modws2_32.NewProc("WSADuplicateSocketW")
procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW")
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
procWSAIoctl = modws2_32.NewProc("WSAIoctl")
@@ -4391,6 +4392,14 @@ func WSACleanup() (err error) {
return
}
+func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) {
+ r1, _, e1 := syscall.Syscall(procWSADuplicateSocketW.Addr(), 3, uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info)))
+ if r1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) {
r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength)))
n = int32(r0)
diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go
index 14f89470a..13e9a64ad 100644
--- a/vendor/golang.org/x/term/terminal.go
+++ b/vendor/golang.org/x/term/terminal.go
@@ -6,6 +6,7 @@ package term
import (
"bytes"
+ "fmt"
"io"
"runtime"
"strconv"
@@ -36,6 +37,26 @@ var vt100EscapeCodes = EscapeCodes{
Reset: []byte{keyEscape, '[', '0', 'm'},
}
+// A History provides a (possibly bounded) queue of input lines read by [Terminal.ReadLine].
+type History interface {
+ // Add will be called by [Terminal.ReadLine] to add
+ // a new, most recent entry to the history.
+ // It is allowed to drop any entry, including
+ // the entry being added (e.g., if it's deemed an invalid entry),
+ // the least-recent entry (e.g., to keep the history bounded),
+ // or any other entry.
+ Add(entry string)
+
+ // Len returns the number of entries in the history.
+ Len() int
+
+ // At returns an entry from the history.
+ // Index 0 is the most-recently added entry and
+ // index Len()-1 is the least-recently added entry.
+ // If index is < 0 or >= Len(), it panics.
+ At(idx int) string
+}
+
// Terminal contains the state for running a VT100 terminal that is capable of
// reading lines of input.
type Terminal struct {
@@ -86,9 +107,14 @@ type Terminal struct {
remainder []byte
inBuf [256]byte
- // history contains previously entered commands so that they can be
- // accessed with the up and down keys.
- history stRingBuffer
+ // History records and retrieves lines of input read by [ReadLine] which
+ // a user can retrieve and navigate using the up and down arrow keys.
+ //
+ // It is not safe to call ReadLine concurrently with any methods on History.
+ //
+ // [NewTerminal] sets this to a default implementation that records the
+ // last 100 lines of input.
+ History History
// historyIndex stores the currently accessed history entry, where zero
// means the immediately previous entry.
historyIndex int
@@ -111,6 +137,7 @@ func NewTerminal(c io.ReadWriter, prompt string) *Terminal {
termHeight: 24,
echo: true,
historyIndex: -1,
+ History: &stRingBuffer{},
}
}
@@ -450,6 +477,23 @@ func visualLength(runes []rune) int {
return length
}
+// histroryAt unlocks the terminal and relocks it while calling History.At.
+func (t *Terminal) historyAt(idx int) (string, bool) {
+ t.lock.Unlock() // Unlock to avoid deadlock if History methods use the output writer.
+ defer t.lock.Lock() // panic in At (or Len) protection.
+ if idx < 0 || idx >= t.History.Len() {
+ return "", false
+ }
+ return t.History.At(idx), true
+}
+
+// historyAdd unlocks the terminal and relocks it while calling History.Add.
+func (t *Terminal) historyAdd(entry string) {
+ t.lock.Unlock() // Unlock to avoid deadlock if History methods use the output writer.
+ defer t.lock.Lock() // panic in Add protection.
+ t.History.Add(entry)
+}
+
// handleKey processes the given key and, optionally, returns a line of text
// that the user has entered.
func (t *Terminal) handleKey(key rune) (line string, ok bool) {
@@ -497,7 +541,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) {
t.pos = len(t.line)
t.moveCursorToPos(t.pos)
case keyUp:
- entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1)
+ entry, ok := t.historyAt(t.historyIndex + 1)
if !ok {
return "", false
}
@@ -516,7 +560,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) {
t.setLine(runes, len(runes))
t.historyIndex--
default:
- entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1)
+ entry, ok := t.historyAt(t.historyIndex - 1)
if ok {
t.historyIndex--
runes := []rune(entry)
@@ -781,7 +825,7 @@ func (t *Terminal) readLine() (line string, err error) {
if lineOk {
if t.echo {
t.historyIndex = -1
- t.history.Add(line)
+ t.historyAdd(line)
}
if lineIsPasted {
err = ErrPasteIndicator
@@ -938,19 +982,23 @@ func (s *stRingBuffer) Add(a string) {
}
}
-// NthPreviousEntry returns the value passed to the nth previous call to Add.
+func (s *stRingBuffer) Len() int {
+ return s.size
+}
+
+// At returns the value passed to the nth previous call to Add.
// If n is zero then the immediately prior value is returned, if one, then the
// next most recent, and so on. If such an element doesn't exist then ok is
// false.
-func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) {
+func (s *stRingBuffer) At(n int) string {
if n < 0 || n >= s.size {
- return "", false
+ panic(fmt.Sprintf("term: history index [%d] out of range [0,%d)", n, s.size))
}
index := s.head - n
if index < 0 {
index += s.max
}
- return s.entries[index], true
+ return s.entries[index]
}
// readPasswordLine reads from reader until it finds \n or io.EOF.
diff --git a/vendor/google.golang.org/api/AUTHORS b/vendor/google.golang.org/api/AUTHORS
deleted file mode 100644
index f07029059..000000000
--- a/vendor/google.golang.org/api/AUTHORS
+++ /dev/null
@@ -1,11 +0,0 @@
-# This is the official list of authors for copyright purposes.
-# This file is distinct from the CONTRIBUTORS files.
-# See the latter for an explanation.
-
-# Names should be added to this file as
-# Name or Organization
-# The email address is not required for organizations.
-
-# Please keep the list sorted.
-Google Inc.
-LightStep Inc.
diff --git a/vendor/google.golang.org/api/CONTRIBUTORS b/vendor/google.golang.org/api/CONTRIBUTORS
deleted file mode 100644
index 788677b8f..000000000
--- a/vendor/google.golang.org/api/CONTRIBUTORS
+++ /dev/null
@@ -1,56 +0,0 @@
-# This is the official list of people who can contribute
-# (and typically have contributed) code to the repository.
-# The AUTHORS file lists the copyright holders; this file
-# lists people. For example, Google employees are listed here
-# but not in AUTHORS, because Google holds the copyright.
-#
-# The submission process automatically checks to make sure
-# that people submitting code are listed in this file (by email address).
-#
-# Names should be added to this file only after verifying that
-# the individual or the individual's organization has agreed to
-# the appropriate Contributor License Agreement, found here:
-#
-# https://cla.developers.google.com/about/google-individual
-# https://cla.developers.google.com/about/google-corporate
-#
-# The CLA can be filled out on the web:
-#
-# https://cla.developers.google.com/
-#
-# When adding J Random Contributor's name to this file,
-# either J's name or J's organization's name should be
-# added to the AUTHORS file, depending on whether the
-# individual or corporate CLA was used.
-
-# Names should be added to this file like so:
-# Name
-#
-# An entry with two email addresses specifies that the
-# first address should be used in the submit logs and
-# that the second address should be recognized as the
-# same person when interacting with Rietveld.
-
-# Please keep the list sorted.
-
-Alain Vongsouvanhalainv
-Andrew Gerrand
-Brad Fitzpatrick
-Eric Koleda
-Francesc Campoy
-Garrick Evans
-Glenn Lewis
-Ivan Krasin
-Jason Hall
-Johan Euphrosine
-Kostik Shtoyk
-Kunpei Sakai
-Matthew Dolan
-Matthew Whisenhunt
-Michael McGreevy
-Nick Craig-Wood
-Robbie Trencheny
-Ross Light
-Sarah Adams
-Scott Van Woudenberg
-Takashi Matsuo
diff --git a/vendor/google.golang.org/api/LICENSE b/vendor/google.golang.org/api/LICENSE
deleted file mode 100644
index 263aa7a0c..000000000
--- a/vendor/google.golang.org/api/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2011 Google Inc. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/google.golang.org/api/support/bundler/bundler.go b/vendor/google.golang.org/api/support/bundler/bundler.go
deleted file mode 100644
index 9f8237a03..000000000
--- a/vendor/google.golang.org/api/support/bundler/bundler.go
+++ /dev/null
@@ -1,407 +0,0 @@
-// Copyright 2016 Google LLC.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package bundler supports bundling (batching) of items. Bundling amortizes an
-// action with fixed costs over multiple items. For example, if an API provides
-// an RPC that accepts a list of items as input, but clients would prefer
-// adding items one at a time, then a Bundler can accept individual items from
-// the client and bundle many of them into a single RPC.
-//
-// This package is experimental and subject to change without notice.
-package bundler
-
-import (
- "context"
- "errors"
- "reflect"
- "sync"
- "time"
-
- "golang.org/x/sync/semaphore"
-)
-
-type mode int
-
-const (
- DefaultDelayThreshold = time.Second
- DefaultBundleCountThreshold = 10
- DefaultBundleByteThreshold = 1e6 // 1M
- DefaultBufferedByteLimit = 1e9 // 1G
-)
-
-const (
- none mode = iota
- add
- addWait
-)
-
-var (
- // ErrOverflow indicates that Bundler's stored bytes exceeds its BufferedByteLimit.
- ErrOverflow = errors.New("bundler reached buffered byte limit")
-
- // ErrOversizedItem indicates that an item's size exceeds the maximum bundle size.
- ErrOversizedItem = errors.New("item size exceeds bundle byte limit")
-
- // errMixedMethods indicates that mutually exclusive methods has been
- // called subsequently.
- errMixedMethods = errors.New("calls to Add and AddWait cannot be mixed")
-)
-
-// A Bundler collects items added to it into a bundle until the bundle
-// exceeds a given size, then calls a user-provided function to handle the
-// bundle.
-//
-// The exported fields are only safe to modify prior to the first call to Add
-// or AddWait.
-type Bundler struct {
- // Starting from the time that the first message is added to a bundle, once
- // this delay has passed, handle the bundle. The default is DefaultDelayThreshold.
- DelayThreshold time.Duration
-
- // Once a bundle has this many items, handle the bundle. Since only one
- // item at a time is added to a bundle, no bundle will exceed this
- // threshold, so it also serves as a limit. The default is
- // DefaultBundleCountThreshold.
- BundleCountThreshold int
-
- // Once the number of bytes in current bundle reaches this threshold, handle
- // the bundle. The default is DefaultBundleByteThreshold. This triggers handling,
- // but does not cap the total size of a bundle.
- BundleByteThreshold int
-
- // The maximum size of a bundle, in bytes. Zero means unlimited.
- BundleByteLimit int
-
- // The maximum number of bytes that the Bundler will keep in memory before
- // returning ErrOverflow. The default is DefaultBufferedByteLimit.
- BufferedByteLimit int
-
- // The maximum number of handler invocations that can be running at once.
- // The default is 1.
- HandlerLimit int
-
- handler func(interface{}) // called to handle a bundle
- itemSliceZero reflect.Value // nil (zero value) for slice of items
-
- mu sync.Mutex // guards access to fields below
- flushTimer *time.Timer // implements DelayThreshold
- handlerCount int // # of bundles currently being handled (i.e. handler is invoked on them)
- sem *semaphore.Weighted // enforces BufferedByteLimit
- semOnce sync.Once // guards semaphore initialization
- // The current bundle we're adding items to. Not yet in the queue.
- // Appended to the queue once the flushTimer fires or the bundle
- // thresholds/limits are reached. If curBundle is nil and tail is
- // not, we first try to add items to tail. Once tail is full or handled,
- // we create a new curBundle for the incoming item.
- curBundle *bundle
- // The next bundle in the queue to be handled. Nil if the queue is
- // empty.
- head *bundle
- // The last bundle in the queue to be handled. Nil if the queue is
- // empty. If curBundle is nil and tail isn't, we attempt to add new
- // items to the tail until if becomes full or has been passed to the
- // handler.
- tail *bundle
- curFlush *sync.WaitGroup // counts outstanding bundles since last flush
- prevFlush chan bool // signal used to wait for prior flush
-
- // The first call to Add or AddWait, mode will be add or addWait respectively.
- // If there wasn't call yet then mode is none.
- mode mode
- // TODO: consider alternative queue implementation for head/tail bundle. see:
- // https://code-review.googlesource.com/c/google-api-go-client/+/47991/4/support/bundler/bundler.go#74
-}
-
-// A bundle is a group of items that were added individually and will be passed
-// to a handler as a slice.
-type bundle struct {
- items reflect.Value // slice of T
- size int // size in bytes of all items
- next *bundle // bundles are handled in order as a linked list queue
- flush *sync.WaitGroup // the counter that tracks flush completion
-}
-
-// add appends item to this bundle and increments the total size. It requires
-// that b.mu is locked.
-func (bu *bundle) add(item interface{}, size int) {
- bu.items = reflect.Append(bu.items, reflect.ValueOf(item))
- bu.size += size
-}
-
-// NewBundler creates a new Bundler.
-//
-// itemExample is a value of the type that will be bundled. For example, if you
-// want to create bundles of *Entry, you could pass &Entry{} for itemExample.
-//
-// handler is a function that will be called on each bundle. If itemExample is
-// of type T, the argument to handler is of type []T. handler is always called
-// sequentially for each bundle, and never in parallel.
-//
-// Configure the Bundler by setting its thresholds and limits before calling
-// any of its methods.
-func NewBundler(itemExample interface{}, handler func(interface{})) *Bundler {
- b := &Bundler{
- DelayThreshold: DefaultDelayThreshold,
- BundleCountThreshold: DefaultBundleCountThreshold,
- BundleByteThreshold: DefaultBundleByteThreshold,
- BufferedByteLimit: DefaultBufferedByteLimit,
- HandlerLimit: 1,
-
- handler: handler,
- itemSliceZero: reflect.Zero(reflect.SliceOf(reflect.TypeOf(itemExample))),
- curFlush: &sync.WaitGroup{},
- }
- return b
-}
-
-func (b *Bundler) initSemaphores() {
- // Create the semaphores lazily, because the user may set limits
- // after NewBundler.
- b.semOnce.Do(func() {
- b.sem = semaphore.NewWeighted(int64(b.BufferedByteLimit))
- })
-}
-
-// enqueueCurBundle moves curBundle to the end of the queue. The bundle may be
-// handled immediately if we are below HandlerLimit. It requires that b.mu is
-// locked.
-func (b *Bundler) enqueueCurBundle() {
- // We don't require callers to check if there is a pending bundle. It
- // may have already been appended to the queue. If so, return early.
- if b.curBundle == nil {
- return
- }
- // If we are below the HandlerLimit, the queue must be empty. Handle
- // immediately with a new goroutine.
- if b.handlerCount < b.HandlerLimit {
- b.handlerCount++
- go b.handle(b.curBundle)
- } else if b.tail != nil {
- // There are bundles on the queue, so append to the end
- b.tail.next = b.curBundle
- b.tail = b.curBundle
- } else {
- // The queue is empty, so initialize the queue
- b.head = b.curBundle
- b.tail = b.curBundle
- }
- b.curBundle = nil
- if b.flushTimer != nil {
- b.flushTimer.Stop()
- b.flushTimer = nil
- }
-}
-
-// setMode sets the state of Bundler's mode. If mode was defined before
-// and passed state is different from it then return an error.
-func (b *Bundler) setMode(m mode) error {
- b.mu.Lock()
- defer b.mu.Unlock()
- if b.mode == m || b.mode == none {
- b.mode = m
- return nil
- }
- return errMixedMethods
-}
-
-// canFit returns true if bu can fit an additional item of size bytes based
-// on the limits of Bundler b.
-func (b *Bundler) canFit(bu *bundle, size int) bool {
- return (b.BundleByteLimit <= 0 || bu.size+size <= b.BundleByteLimit) &&
- (b.BundleCountThreshold <= 0 || bu.items.Len() < b.BundleCountThreshold)
-}
-
-// Add adds item to the current bundle. It marks the bundle for handling and
-// starts a new one if any of the thresholds or limits are exceeded.
-// The type of item must be assignable to the itemExample parameter of the NewBundler
-// method, otherwise there will be a panic.
-//
-// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then
-// the item can never be handled. Add returns ErrOversizedItem in this case.
-//
-// If adding the item would exceed the maximum memory allowed
-// (Bundler.BufferedByteLimit) or an AddWait call is blocked waiting for
-// memory, Add returns ErrOverflow.
-//
-// Add never blocks.
-func (b *Bundler) Add(item interface{}, size int) error {
- if err := b.setMode(add); err != nil {
- return err
- }
- // If this item exceeds the maximum size of a bundle,
- // we can never send it.
- if b.BundleByteLimit > 0 && size > b.BundleByteLimit {
- return ErrOversizedItem
- }
-
- // If adding this item would exceed our allotted memory
- // footprint, we can't accept it.
- // (TryAcquire also returns false if anything is waiting on the semaphore,
- // so calls to Add and AddWait shouldn't be mixed.)
- b.initSemaphores()
- if !b.sem.TryAcquire(int64(size)) {
- return ErrOverflow
- }
-
- b.mu.Lock()
- defer b.mu.Unlock()
- return b.add(item, size)
-}
-
-// add adds item to the tail of the bundle queue or curBundle depending on space
-// and nil-ness (see inline comments). It marks curBundle for handling (by
-// appending it to the queue) if any of the thresholds or limits are exceeded.
-// curBundle is lazily initialized. It requires that b.mu is locked.
-func (b *Bundler) add(item interface{}, size int) error {
- // If we don't have a curBundle, see if we can add to the queue tail.
- if b.tail != nil && b.curBundle == nil && b.canFit(b.tail, size) {
- b.tail.add(item, size)
- return nil
- }
-
- // If we can't fit in the existing curBundle, move it onto the queue.
- if b.curBundle != nil && !b.canFit(b.curBundle, size) {
- b.enqueueCurBundle()
- }
-
- // Create a curBundle if we don't have one.
- if b.curBundle == nil {
- b.curFlush.Add(1)
- b.curBundle = &bundle{
- items: b.itemSliceZero,
- flush: b.curFlush,
- }
- }
-
- // Add the item.
- b.curBundle.add(item, size)
-
- // If curBundle is ready for handling, move it to the queue.
- if b.curBundle.size >= b.BundleByteThreshold ||
- b.curBundle.items.Len() == b.BundleCountThreshold {
- b.enqueueCurBundle()
- }
-
- // If we created a new bundle and it wasn't immediately handled, set a timer
- if b.curBundle != nil && b.flushTimer == nil {
- b.flushTimer = time.AfterFunc(b.DelayThreshold, b.tryHandleBundles)
- }
-
- return nil
-}
-
-// tryHandleBundles is the timer callback that handles or queues any current
-// bundle after DelayThreshold time, even if the bundle isn't completely full.
-func (b *Bundler) tryHandleBundles() {
- b.mu.Lock()
- b.enqueueCurBundle()
- b.mu.Unlock()
-}
-
-// next returns the next bundle that is ready for handling and removes it from
-// the internal queue. It requires that b.mu is locked.
-func (b *Bundler) next() *bundle {
- if b.head == nil {
- return nil
- }
- out := b.head
- b.head = b.head.next
- if b.head == nil {
- b.tail = nil
- }
- out.next = nil
- return out
-}
-
-// handle calls the user-specified handler on the given bundle. handle is
-// intended to be run as a goroutine. After the handler returns, we update the
-// byte total. handle continues processing additional bundles that are ready.
-// If no more bundles are ready, the handler count is decremented and the
-// goroutine ends.
-func (b *Bundler) handle(bu *bundle) {
- for bu != nil {
- b.handler(bu.items.Interface())
- bu = b.postHandle(bu)
- }
-}
-
-func (b *Bundler) postHandle(bu *bundle) *bundle {
- b.mu.Lock()
- defer b.mu.Unlock()
-
- b.sem.Release(int64(bu.size))
- bu.flush.Done()
-
- bu = b.next()
- if bu == nil {
- b.handlerCount--
- }
- return bu
-}
-
-// AddWait adds item to the current bundle. It marks the bundle for handling and
-// starts a new one if any of the thresholds or limits are exceeded.
-//
-// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then
-// the item can never be handled. AddWait returns ErrOversizedItem in this case.
-//
-// If adding the item would exceed the maximum memory allowed (Bundler.BufferedByteLimit),
-// AddWait blocks until space is available or ctx is done.
-//
-// Calls to Add and AddWait should not be mixed on the same Bundler.
-func (b *Bundler) AddWait(ctx context.Context, item interface{}, size int) error {
- if err := b.setMode(addWait); err != nil {
- return err
- }
- // If this item exceeds the maximum size of a bundle,
- // we can never send it.
- if b.BundleByteLimit > 0 && size > b.BundleByteLimit {
- return ErrOversizedItem
- }
- // If adding this item would exceed our allotted memory footprint, block
- // until space is available. The semaphore is FIFO, so there will be no
- // starvation.
- b.initSemaphores()
- if err := b.sem.Acquire(ctx, int64(size)); err != nil {
- return err
- }
-
- b.mu.Lock()
- defer b.mu.Unlock()
- return b.add(item, size)
-}
-
-// Flush invokes the handler for all remaining items in the Bundler and waits
-// for it to return.
-func (b *Bundler) Flush() {
- b.mu.Lock()
-
- // If a curBundle is pending, move it to the queue.
- b.enqueueCurBundle()
-
- // Store a pointer to the WaitGroup that counts outstanding bundles
- // in the current flush and create a new one to track the next flush.
- wg := b.curFlush
- b.curFlush = &sync.WaitGroup{}
-
- // Flush must wait for all prior, outstanding flushes to complete.
- // We use a channel to communicate completion between each flush in
- // the sequence.
- prev := b.prevFlush
- next := make(chan bool)
- b.prevFlush = next
-
- b.mu.Unlock()
-
- // Wait until the previous flush is finished.
- if prev != nil {
- <-prev
- }
-
- // Wait until this flush is finished.
- wg.Wait()
-
- // Allow the next flush to finish.
- close(next)
-}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/LICENSE b/vendor/google.golang.org/genproto/googleapis/api/LICENSE
deleted file mode 100644
index d64569567..000000000
--- a/vendor/google.golang.org/genproto/googleapis/api/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
deleted file mode 100644
index f388426b0..000000000
--- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
+++ /dev/null
@@ -1,235 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.26.0
-// protoc v4.24.4
-// source: google/api/httpbody.proto
-
-package httpbody
-
-import (
- reflect "reflect"
- sync "sync"
-
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- anypb "google.golang.org/protobuf/types/known/anypb"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Message that represents an arbitrary HTTP body. It should only be used for
-// payload formats that can't be represented as JSON, such as raw binary or
-// an HTML page.
-//
-// This message can be used both in streaming and non-streaming API methods in
-// the request as well as the response.
-//
-// It can be used as a top-level request field, which is convenient if one
-// wants to extract parameters from either the URL or HTTP template into the
-// request fields and also want access to the raw HTTP body.
-//
-// Example:
-//
-// message GetResourceRequest {
-// // A unique request id.
-// string request_id = 1;
-//
-// // The raw HTTP body is bound to this field.
-// google.api.HttpBody http_body = 2;
-//
-// }
-//
-// service ResourceService {
-// rpc GetResource(GetResourceRequest)
-// returns (google.api.HttpBody);
-// rpc UpdateResource(google.api.HttpBody)
-// returns (google.protobuf.Empty);
-//
-// }
-//
-// Example with streaming methods:
-//
-// service CaldavService {
-// rpc GetCalendar(stream google.api.HttpBody)
-// returns (stream google.api.HttpBody);
-// rpc UpdateCalendar(stream google.api.HttpBody)
-// returns (stream google.api.HttpBody);
-//
-// }
-//
-// Use of this type only changes how the request and response bodies are
-// handled, all other features will continue to work unchanged.
-type HttpBody struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The HTTP Content-Type header value specifying the content type of the body.
- ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"`
- // The HTTP request/response body as raw binary.
- Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
- // Application specific response metadata. Must be set in the first response
- // for streaming APIs.
- Extensions []*anypb.Any `protobuf:"bytes,3,rep,name=extensions,proto3" json:"extensions,omitempty"`
-}
-
-func (x *HttpBody) Reset() {
- *x = HttpBody{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_api_httpbody_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HttpBody) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HttpBody) ProtoMessage() {}
-
-func (x *HttpBody) ProtoReflect() protoreflect.Message {
- mi := &file_google_api_httpbody_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HttpBody.ProtoReflect.Descriptor instead.
-func (*HttpBody) Descriptor() ([]byte, []int) {
- return file_google_api_httpbody_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *HttpBody) GetContentType() string {
- if x != nil {
- return x.ContentType
- }
- return ""
-}
-
-func (x *HttpBody) GetData() []byte {
- if x != nil {
- return x.Data
- }
- return nil
-}
-
-func (x *HttpBody) GetExtensions() []*anypb.Any {
- if x != nil {
- return x.Extensions
- }
- return nil
-}
-
-var File_google_api_httpbody_proto protoreflect.FileDescriptor
-
-var file_google_api_httpbody_proto_rawDesc = []byte{
- 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74,
- 0x70, 0x62, 0x6f, 0x64, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x22, 0x77, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21,
- 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70,
- 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52,
- 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
- 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
- 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x65, 0x0a, 0x0e, 0x63,
- 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48,
- 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72,
- 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f,
- 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xa2, 0x02, 0x04, 0x47, 0x41,
- 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_api_httpbody_proto_rawDescOnce sync.Once
- file_google_api_httpbody_proto_rawDescData = file_google_api_httpbody_proto_rawDesc
-)
-
-func file_google_api_httpbody_proto_rawDescGZIP() []byte {
- file_google_api_httpbody_proto_rawDescOnce.Do(func() {
- file_google_api_httpbody_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_httpbody_proto_rawDescData)
- })
- return file_google_api_httpbody_proto_rawDescData
-}
-
-var file_google_api_httpbody_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_api_httpbody_proto_goTypes = []interface{}{
- (*HttpBody)(nil), // 0: google.api.HttpBody
- (*anypb.Any)(nil), // 1: google.protobuf.Any
-}
-var file_google_api_httpbody_proto_depIdxs = []int32{
- 1, // 0: google.api.HttpBody.extensions:type_name -> google.protobuf.Any
- 1, // [1:1] is the sub-list for method output_type
- 1, // [1:1] is the sub-list for method input_type
- 1, // [1:1] is the sub-list for extension type_name
- 1, // [1:1] is the sub-list for extension extendee
- 0, // [0:1] is the sub-list for field type_name
-}
-
-func init() { file_google_api_httpbody_proto_init() }
-func file_google_api_httpbody_proto_init() {
- if File_google_api_httpbody_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_google_api_httpbody_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HttpBody); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_api_httpbody_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 1,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_api_httpbody_proto_goTypes,
- DependencyIndexes: file_google_api_httpbody_proto_depIdxs,
- MessageInfos: file_google_api_httpbody_proto_msgTypes,
- }.Build()
- File_google_api_httpbody_proto = out.File
- file_google_api_httpbody_proto_rawDesc = nil
- file_google_api_httpbody_proto_goTypes = nil
- file_google_api_httpbody_proto_depIdxs = nil
-}
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE
deleted file mode 100644
index d64569567..000000000
--- a/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
deleted file mode 100644
index 6ad1b1c1d..000000000
--- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.26.0
-// protoc v4.24.4
-// source: google/rpc/status.proto
-
-package status
-
-import (
- reflect "reflect"
- sync "sync"
-
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- anypb "google.golang.org/protobuf/types/known/anypb"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// The `Status` type defines a logical error model that is suitable for
-// different programming environments, including REST APIs and RPC APIs. It is
-// used by [gRPC](https://github.com/grpc). Each `Status` message contains
-// three pieces of data: error code, error message, and error details.
-//
-// You can find out more about this error model and how to work with it in the
-// [API Design Guide](https://cloud.google.com/apis/design/errors).
-type Status struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The status code, which should be an enum value of
- // [google.rpc.Code][google.rpc.Code].
- Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
- // A developer-facing error message, which should be in English. Any
- // user-facing error message should be localized and sent in the
- // [google.rpc.Status.details][google.rpc.Status.details] field, or localized
- // by the client.
- Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
- // A list of messages that carry the error details. There is a common set of
- // message types for APIs to use.
- Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
-}
-
-func (x *Status) Reset() {
- *x = Status{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_rpc_status_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Status) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Status) ProtoMessage() {}
-
-func (x *Status) ProtoReflect() protoreflect.Message {
- mi := &file_google_rpc_status_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Status.ProtoReflect.Descriptor instead.
-func (*Status) Descriptor() ([]byte, []int) {
- return file_google_rpc_status_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *Status) GetCode() int32 {
- if x != nil {
- return x.Code
- }
- return 0
-}
-
-func (x *Status) GetMessage() string {
- if x != nil {
- return x.Message
- }
- return ""
-}
-
-func (x *Status) GetDetails() []*anypb.Any {
- if x != nil {
- return x.Details
- }
- return nil
-}
-
-var File_google_rpc_status_proto protoreflect.FileDescriptor
-
-var file_google_rpc_status_proto_rawDesc = []byte{
- 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f,
- 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18,
- 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61,
- 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
- 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74,
- 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
- 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74,
- 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_rpc_status_proto_rawDescOnce sync.Once
- file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc
-)
-
-func file_google_rpc_status_proto_rawDescGZIP() []byte {
- file_google_rpc_status_proto_rawDescOnce.Do(func() {
- file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData)
- })
- return file_google_rpc_status_proto_rawDescData
-}
-
-var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_rpc_status_proto_goTypes = []interface{}{
- (*Status)(nil), // 0: google.rpc.Status
- (*anypb.Any)(nil), // 1: google.protobuf.Any
-}
-var file_google_rpc_status_proto_depIdxs = []int32{
- 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any
- 1, // [1:1] is the sub-list for method output_type
- 1, // [1:1] is the sub-list for method input_type
- 1, // [1:1] is the sub-list for extension type_name
- 1, // [1:1] is the sub-list for extension extendee
- 0, // [0:1] is the sub-list for field type_name
-}
-
-func init() { file_google_rpc_status_proto_init() }
-func file_google_rpc_status_proto_init() {
- if File_google_rpc_status_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Status); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_rpc_status_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 1,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_rpc_status_proto_goTypes,
- DependencyIndexes: file_google_rpc_status_proto_depIdxs,
- MessageInfos: file_google_rpc_status_proto_msgTypes,
- }.Build()
- File_google_rpc_status_proto = out.File
- file_google_rpc_status_proto_rawDesc = nil
- file_google_rpc_status_proto_goTypes = nil
- file_google_rpc_status_proto_depIdxs = nil
-}
diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS
deleted file mode 100644
index e491a9e7f..000000000
--- a/vendor/google.golang.org/grpc/AUTHORS
+++ /dev/null
@@ -1 +0,0 @@
-Google Inc.
diff --git a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md
deleted file mode 100644
index 9d4213ebc..000000000
--- a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Community Code of Conduct
-
-gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
deleted file mode 100644
index d9bfa6e1e..000000000
--- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ /dev/null
@@ -1,73 +0,0 @@
-# How to contribute
-
-We definitely welcome your patches and contributions to gRPC! Please read the gRPC
-organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md)
-and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding.
-
-If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
-
-## Legal requirements
-
-In order to protect both you and ourselves, you will need to sign the
-[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf).
-
-## Guidelines for Pull Requests
-How to get your contributions merged smoothly and quickly.
-
-- Create **small PRs** that are narrowly focused on **addressing a single
- concern**. We often times receive PRs that are trying to fix several things at
- a time, but only one fix is considered acceptable, nothing gets merged and
- both author's & review's time is wasted. Create more PRs to address different
- concerns and everyone will be happy.
-
-- If you are searching for features to work on, issues labeled [Status: Help
- Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22)
- is a great place to start. These issues are well-documented and usually can be
- resolved with a single pull request.
-
-- If you are adding a new file, make sure it has the copyright message template
- at the top as a comment. You can copy over the message from an existing file
- and update the year.
-
-- The grpc package should only depend on standard Go packages and a small number
- of exceptions. If your contribution introduces new dependencies which are NOT
- in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a
- discussion with gRPC-Go authors and consultants.
-
-- For speculative changes, consider opening an issue and discussing it first. If
- you are suggesting a behavioral or API change, consider starting with a [gRFC
- proposal](https://github.com/grpc/proposal).
-
-- Provide a good **PR description** as a record of **what** change is being made
- and **why** it was made. Link to a GitHub issue if it exists.
-
-- If you want to fix formatting or style, consider whether your changes are an
- obvious improvement or might be considered a personal preference. If a style
- change is based on preference, it likely will not be accepted. If it corrects
- widely agreed-upon anti-patterns, then please do create a PR and explain the
- benefits of the change.
-
-- Unless your PR is trivial, you should expect there will be reviewer comments
- that you'll need to address before merging. We'll mark it as `Status: Requires
- Reporter Clarification` if we expect you to respond to these comments in a
- timely manner. If the PR remains inactive for 6 days, it will be marked as
- `stale` and automatically close 7 days after that if we don't hear back from
- you.
-
-- Maintain **clean commit history** and use **meaningful commit messages**. PRs
- with messy commit history are difficult to review and won't be merged. Use
- `rebase -i upstream/master` to curate your commit history and/or to bring in
- latest changes from master (but avoid rebasing in the middle of a code
- review).
-
-- Keep your PR up to date with upstream/master (if there are merge conflicts, we
- can't really merge your change).
-
-- **All tests need to be passing** before your change can be merged. We
- recommend you **run tests locally** before creating your PR to catch breakages
- early on.
- - `./scripts/vet.sh` to catch vet errors
- - `go test -cpu 1,4 -timeout 7m ./...` to run the tests
- - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode
-
-- Exceptions to the rules can be made if there's a compelling reason for doing so.
diff --git a/vendor/google.golang.org/grpc/GOVERNANCE.md b/vendor/google.golang.org/grpc/GOVERNANCE.md
deleted file mode 100644
index d6ff26747..000000000
--- a/vendor/google.golang.org/grpc/GOVERNANCE.md
+++ /dev/null
@@ -1 +0,0 @@
-This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md).
diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE
deleted file mode 100644
index d64569567..000000000
--- a/vendor/google.golang.org/grpc/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md
deleted file mode 100644
index 5d4096d46..000000000
--- a/vendor/google.golang.org/grpc/MAINTAINERS.md
+++ /dev/null
@@ -1,36 +0,0 @@
-This page lists all active maintainers of this repository. If you were a
-maintainer and would like to add your name to the Emeritus list, please send us a
-PR.
-
-See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md)
-for governance guidelines and how to become a maintainer.
-See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md)
-for general contribution guidelines.
-
-## Maintainers (in alphabetical order)
-
-- [aranjans](https://github.com/aranjans), Google LLC
-- [arjan-bal](https://github.com/arjan-bal), Google LLC
-- [arvindbr8](https://github.com/arvindbr8), Google LLC
-- [atollena](https://github.com/atollena), Datadog, Inc.
-- [dfawley](https://github.com/dfawley), Google LLC
-- [easwars](https://github.com/easwars), Google LLC
-- [erm-g](https://github.com/erm-g), Google LLC
-- [gtcooke94](https://github.com/gtcooke94), Google LLC
-- [purnesh42h](https://github.com/purnesh42h), Google LLC
-- [zasweq](https://github.com/zasweq), Google LLC
-
-## Emeritus Maintainers (in alphabetical order)
-- [adelez](https://github.com/adelez)
-- [canguler](https://github.com/canguler)
-- [cesarghali](https://github.com/cesarghali)
-- [iamqizhao](https://github.com/iamqizhao)
-- [jeanbza](https://github.com/jeanbza)
-- [jtattermusch](https://github.com/jtattermusch)
-- [lyuxuan](https://github.com/lyuxuan)
-- [makmukhi](https://github.com/makmukhi)
-- [matt-kwong](https://github.com/matt-kwong)
-- [menghanl](https://github.com/menghanl)
-- [nicolasnoble](https://github.com/nicolasnoble)
-- [srini100](https://github.com/srini100)
-- [yongni](https://github.com/yongni)
diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile
deleted file mode 100644
index be38384ff..000000000
--- a/vendor/google.golang.org/grpc/Makefile
+++ /dev/null
@@ -1,49 +0,0 @@
-all: vet test testrace
-
-build:
- go build google.golang.org/grpc/...
-
-clean:
- go clean -i google.golang.org/grpc/...
-
-deps:
- GO111MODULE=on go get -d -v google.golang.org/grpc/...
-
-proto:
- @ if ! which protoc > /dev/null; then \
- echo "error: protoc not installed" >&2; \
- exit 1; \
- fi
- go generate google.golang.org/grpc/...
-
-test:
- go test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
-
-testsubmodule:
- cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/...
- cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/...
-
-testrace:
- go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/...
-
-testdeps:
- GO111MODULE=on go get -d -v -t google.golang.org/grpc/...
-
-vet: vetdeps
- ./scripts/vet.sh
-
-vetdeps:
- ./scripts/vet.sh -install
-
-.PHONY: \
- all \
- build \
- clean \
- deps \
- proto \
- test \
- testsubmodule \
- testrace \
- testdeps \
- vet \
- vetdeps
diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt
deleted file mode 100644
index 530197749..000000000
--- a/vendor/google.golang.org/grpc/NOTICE.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-Copyright 2014 gRPC authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
deleted file mode 100644
index b572707c6..000000000
--- a/vendor/google.golang.org/grpc/README.md
+++ /dev/null
@@ -1,107 +0,0 @@
-# gRPC-Go
-
-[][API]
-[](https://goreportcard.com/report/github.com/grpc/grpc-go)
-[](https://codecov.io/gh/grpc/grpc-go)
-
-The [Go][] implementation of [gRPC][]: A high performance, open source, general
-RPC framework that puts mobile and HTTP/2 first. For more information see the
-[Go gRPC docs][], or jump directly into the [quick start][].
-
-## Prerequisites
-
-- **[Go][]**: any one of the **two latest major** [releases][go-releases].
-
-## Installation
-
-Simply add the following import to your code, and then `go [build|run|test]`
-will automatically fetch the necessary dependencies:
-
-
-```go
-import "google.golang.org/grpc"
-```
-
-> **Note:** If you are trying to access `grpc-go` from **China**, see the
-> [FAQ](#FAQ) below.
-
-## Learn more
-
-- [Go gRPC docs][], which include a [quick start][] and [API
- reference][API] among other resources
-- [Low-level technical docs](Documentation) from this repository
-- [Performance benchmark][]
-- [Examples](examples)
-
-## FAQ
-
-### I/O Timeout Errors
-
-The `golang.org` domain may be blocked from some countries. `go get` usually
-produces an error like the following when this happens:
-
-```console
-$ go get -u google.golang.org/grpc
-package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout)
-```
-
-To build Go code, there are several options:
-
-- Set up a VPN and access google.golang.org through that.
-
-- With Go module support: it is possible to use the `replace` feature of `go
- mod` to create aliases for golang.org packages. In your project's directory:
-
- ```sh
- go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest
- go mod tidy
- go mod vendor
- go build -mod=vendor
- ```
-
- Again, this will need to be done for all transitive dependencies hosted on
- golang.org as well. For details, refer to [golang/go issue
- #28652](https://github.com/golang/go/issues/28652).
-
-### Compiling error, undefined: grpc.SupportPackageIsVersion
-
-Please update to the latest version of gRPC-Go using
-`go get google.golang.org/grpc`.
-
-### How to turn on logging
-
-The default logger is controlled by environment variables. Turn everything on
-like this:
-
-```console
-$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99
-$ export GRPC_GO_LOG_SEVERITY_LEVEL=info
-```
-
-### The RPC failed with error `"code = Unavailable desc = transport is closing"`
-
-This error means the connection the RPC is using was closed, and there are many
-possible reasons, including:
- 1. mis-configured transport credentials, connection failed on handshaking
- 1. bytes disrupted, possibly by a proxy in between
- 1. server shutdown
- 1. Keepalive parameters caused connection shutdown, for example if you have
- configured your server to terminate connections regularly to [trigger DNS
- lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779).
- If this is the case, you may want to increase your
- [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters),
- to allow longer RPC calls to finish.
-
-It can be tricky to debug this because the error happens on the client side but
-the root cause of the connection being closed is on the server side. Turn on
-logging on __both client and server__, and see if there are any transport
-errors.
-
-[API]: https://pkg.go.dev/google.golang.org/grpc
-[Go]: https://golang.org
-[Go module]: https://github.com/golang/go/wiki/Modules
-[gRPC]: https://grpc.io
-[Go gRPC docs]: https://grpc.io/docs/languages/go
-[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608
-[quick start]: https://grpc.io/docs/languages/go/quickstart
-[go-releases]: https://golang.org/doc/devel/release.html
diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md
deleted file mode 100644
index abab27937..000000000
--- a/vendor/google.golang.org/grpc/SECURITY.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Security Policy
-
-For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go
deleted file mode 100644
index 52d530d7a..000000000
--- a/vendor/google.golang.org/grpc/attributes/attributes.go
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- *
- * Copyright 2019 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package attributes defines a generic key/value store used in various gRPC
-// components.
-//
-// # Experimental
-//
-// Notice: This package is EXPERIMENTAL and may be changed or removed in a
-// later release.
-package attributes
-
-import (
- "fmt"
- "strings"
-)
-
-// Attributes is an immutable struct for storing and retrieving generic
-// key/value pairs. Keys must be hashable, and users should define their own
-// types for keys. Values should not be modified after they are added to an
-// Attributes or if they were received from one. If values implement 'Equal(o
-// any) bool', it will be called by (*Attributes).Equal to determine whether
-// two values with the same key should be considered equal.
-type Attributes struct {
- m map[any]any
-}
-
-// New returns a new Attributes containing the key/value pair.
-func New(key, value any) *Attributes {
- return &Attributes{m: map[any]any{key: value}}
-}
-
-// WithValue returns a new Attributes containing the previous keys and values
-// and the new key/value pair. If the same key appears multiple times, the
-// last value overwrites all previous values for that key. To remove an
-// existing key, use a nil value. value should not be modified later.
-func (a *Attributes) WithValue(key, value any) *Attributes {
- if a == nil {
- return New(key, value)
- }
- n := &Attributes{m: make(map[any]any, len(a.m)+1)}
- for k, v := range a.m {
- n.m[k] = v
- }
- n.m[key] = value
- return n
-}
-
-// Value returns the value associated with these attributes for key, or nil if
-// no value is associated with key. The returned value should not be modified.
-func (a *Attributes) Value(key any) any {
- if a == nil {
- return nil
- }
- return a.m[key]
-}
-
-// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is
-// implemented for a value in the attributes, it is called to determine if the
-// value matches the one stored in the other attributes. If Equal is not
-// implemented, standard equality is used to determine if the two values are
-// equal. Note that some types (e.g. maps) aren't comparable by default, so
-// they must be wrapped in a struct, or in an alias type, with Equal defined.
-func (a *Attributes) Equal(o *Attributes) bool {
- if a == nil && o == nil {
- return true
- }
- if a == nil || o == nil {
- return false
- }
- if len(a.m) != len(o.m) {
- return false
- }
- for k, v := range a.m {
- ov, ok := o.m[k]
- if !ok {
- // o missing element of a
- return false
- }
- if eq, ok := v.(interface{ Equal(o any) bool }); ok {
- if !eq.Equal(ov) {
- return false
- }
- } else if v != ov {
- // Fallback to a standard equality check if Value is unimplemented.
- return false
- }
- }
- return true
-}
-
-// String prints the attribute map. If any key or values throughout the map
-// implement fmt.Stringer, it calls that method and appends.
-func (a *Attributes) String() string {
- var sb strings.Builder
- sb.WriteString("{")
- first := true
- for k, v := range a.m {
- if !first {
- sb.WriteString(", ")
- }
- sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v)))
- first = false
- }
- sb.WriteString("}")
- return sb.String()
-}
-
-func str(x any) (s string) {
- if v, ok := x.(fmt.Stringer); ok {
- return fmt.Sprint(v)
- } else if v, ok := x.(string); ok {
- return v
- }
- return fmt.Sprintf("<%p>", x)
-}
-
-// MarshalJSON helps implement the json.Marshaler interface, thereby rendering
-// the Attributes correctly when printing (via pretty.JSON) structs containing
-// Attributes as fields.
-//
-// Is it impossible to unmarshal attributes from a JSON representation and this
-// method is meant only for debugging purposes.
-func (a *Attributes) MarshalJSON() ([]byte, error) {
- return []byte(a.String()), nil
-}
diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go
deleted file mode 100644
index 29475e31c..000000000
--- a/vendor/google.golang.org/grpc/backoff.go
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// See internal/backoff package for the backoff implementation. This file is
-// kept for the exported types and API backward compatibility.
-
-package grpc
-
-import (
- "time"
-
- "google.golang.org/grpc/backoff"
-)
-
-// DefaultBackoffConfig uses values specified for backoff in
-// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
-//
-// Deprecated: use ConnectParams instead. Will be supported throughout 1.x.
-var DefaultBackoffConfig = BackoffConfig{
- MaxDelay: 120 * time.Second,
-}
-
-// BackoffConfig defines the parameters for the default gRPC backoff strategy.
-//
-// Deprecated: use ConnectParams instead. Will be supported throughout 1.x.
-type BackoffConfig struct {
- // MaxDelay is the upper bound of backoff delay.
- MaxDelay time.Duration
-}
-
-// ConnectParams defines the parameters for connecting and retrying. Users are
-// encouraged to use this instead of the BackoffConfig type defined above. See
-// here for more details:
-// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type ConnectParams struct {
- // Backoff specifies the configuration options for connection backoff.
- Backoff backoff.Config
- // MinConnectTimeout is the minimum amount of time we are willing to give a
- // connection to complete.
- MinConnectTimeout time.Duration
-}
diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go
deleted file mode 100644
index d7b40b7cb..000000000
--- a/vendor/google.golang.org/grpc/backoff/backoff.go
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- *
- * Copyright 2019 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package backoff provides configuration options for backoff.
-//
-// More details can be found at:
-// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
-//
-// All APIs in this package are experimental.
-package backoff
-
-import "time"
-
-// Config defines the configuration options for backoff.
-type Config struct {
- // BaseDelay is the amount of time to backoff after the first failure.
- BaseDelay time.Duration
- // Multiplier is the factor with which to multiply backoffs after a
- // failed retry. Should ideally be greater than 1.
- Multiplier float64
- // Jitter is the factor with which backoffs are randomized.
- Jitter float64
- // MaxDelay is the upper bound of backoff delay.
- MaxDelay time.Duration
-}
-
-// DefaultConfig is a backoff configuration with the default values specified
-// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
-//
-// This should be useful for callers who want to configure backoff with
-// non-default values only for a subset of the options.
-var DefaultConfig = Config{
- BaseDelay: 1.0 * time.Second,
- Multiplier: 1.6,
- Jitter: 0.2,
- MaxDelay: 120 * time.Second,
-}
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
deleted file mode 100644
index c9b343c71..000000000
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package balancer defines APIs for load balancing in gRPC.
-// All APIs in this package are experimental.
-package balancer
-
-import (
- "context"
- "encoding/json"
- "errors"
- "net"
- "strings"
-
- "google.golang.org/grpc/channelz"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/credentials"
- estats "google.golang.org/grpc/experimental/stats"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/serviceconfig"
-)
-
-var (
- // m is a map from name to balancer builder.
- m = make(map[string]Builder)
-
- logger = grpclog.Component("balancer")
-)
-
-// Register registers the balancer builder to the balancer map. b.Name
-// (lowercased) will be used as the name registered with this builder. If the
-// Builder implements ConfigParser, ParseConfig will be called when new service
-// configs are received by the resolver, and the result will be provided to the
-// Balancer in UpdateClientConnState.
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple Balancers are
-// registered with the same name, the one registered last will take effect.
-func Register(b Builder) {
- name := strings.ToLower(b.Name())
- if name != b.Name() {
- // TODO: Skip the use of strings.ToLower() to index the map after v1.59
- // is released to switch to case sensitive balancer registry. Also,
- // remove this warning and update the docstrings for Register and Get.
- logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name())
- }
- m[name] = b
-}
-
-// unregisterForTesting deletes the balancer with the given name from the
-// balancer map.
-//
-// This function is not thread-safe.
-func unregisterForTesting(name string) {
- delete(m, name)
-}
-
-func init() {
- internal.BalancerUnregister = unregisterForTesting
- internal.ConnectedAddress = connectedAddress
- internal.SetConnectedAddress = setConnectedAddress
-}
-
-// Get returns the resolver builder registered with the given name.
-// Note that the compare is done in a case-insensitive fashion.
-// If no builder is register with the name, nil will be returned.
-func Get(name string) Builder {
- if strings.ToLower(name) != name {
- // TODO: Skip the use of strings.ToLower() to index the map after v1.59
- // is released to switch to case sensitive balancer registry. Also,
- // remove this warning and update the docstrings for Register and Get.
- logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name)
- }
- if b, ok := m[strings.ToLower(name)]; ok {
- return b
- }
- return nil
-}
-
-// NewSubConnOptions contains options to create new SubConn.
-type NewSubConnOptions struct {
- // CredsBundle is the credentials bundle that will be used in the created
- // SubConn. If it's nil, the original creds from grpc DialOptions will be
- // used.
- //
- // Deprecated: Use the Attributes field in resolver.Address to pass
- // arbitrary data to the credential handshaker.
- CredsBundle credentials.Bundle
- // HealthCheckEnabled indicates whether health check service should be
- // enabled on this SubConn
- HealthCheckEnabled bool
- // StateListener is called when the state of the subconn changes. If nil,
- // Balancer.UpdateSubConnState will be called instead. Will never be
- // invoked until after Connect() is called on the SubConn created with
- // these options.
- StateListener func(SubConnState)
-}
-
-// State contains the balancer's state relevant to the gRPC ClientConn.
-type State struct {
- // State contains the connectivity state of the balancer, which is used to
- // determine the state of the ClientConn.
- ConnectivityState connectivity.State
- // Picker is used to choose connections (SubConns) for RPCs.
- Picker Picker
-}
-
-// ClientConn represents a gRPC ClientConn.
-//
-// This interface is to be implemented by gRPC. Users should not need a
-// brand new implementation of this interface. For the situations like
-// testing, the new implementation should embed this interface. This allows
-// gRPC to add new methods to this interface.
-//
-// NOTICE: This interface is intended to be implemented by gRPC, or intercepted
-// by custom load balancing polices. Users should not need their own complete
-// implementation of this interface -- they should always delegate to a
-// ClientConn passed to Builder.Build() by embedding it in their
-// implementations. An embedded ClientConn must never be nil, or runtime panics
-// will occur.
-type ClientConn interface {
- // NewSubConn is called by balancer to create a new SubConn.
- // It doesn't block and wait for the connections to be established.
- // Behaviors of the SubConn can be controlled by options.
- //
- // Deprecated: please be aware that in a future version, SubConns will only
- // support one address per SubConn.
- NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error)
- // RemoveSubConn removes the SubConn from ClientConn.
- // The SubConn will be shutdown.
- //
- // Deprecated: use SubConn.Shutdown instead.
- RemoveSubConn(SubConn)
- // UpdateAddresses updates the addresses used in the passed in SubConn.
- // gRPC checks if the currently connected address is still in the new list.
- // If so, the connection will be kept. Else, the connection will be
- // gracefully closed, and a new connection will be created.
- //
- // This may trigger a state transition for the SubConn.
- //
- // Deprecated: this method will be removed. Create new SubConns for new
- // addresses instead.
- UpdateAddresses(SubConn, []resolver.Address)
-
- // UpdateState notifies gRPC that the balancer's internal state has
- // changed.
- //
- // gRPC will update the connectivity state of the ClientConn, and will call
- // Pick on the new Picker to pick new SubConns.
- UpdateState(State)
-
- // ResolveNow is called by balancer to notify gRPC to do a name resolving.
- ResolveNow(resolver.ResolveNowOptions)
-
- // Target returns the dial target for this ClientConn.
- //
- // Deprecated: Use the Target field in the BuildOptions instead.
- Target() string
-
- // MetricsRecorder provides the metrics recorder that balancers can use to
- // record metrics. Balancer implementations which do not register metrics on
- // metrics registry and record on them can ignore this method. The returned
- // MetricsRecorder is guaranteed to never be nil.
- MetricsRecorder() estats.MetricsRecorder
-
- // EnforceClientConnEmbedding is included to force implementers to embed
- // another implementation of this interface, allowing gRPC to add methods
- // without breaking users.
- internal.EnforceClientConnEmbedding
-}
-
-// BuildOptions contains additional information for Build.
-type BuildOptions struct {
- // DialCreds is the transport credentials to use when communicating with a
- // remote load balancer server. Balancer implementations which do not
- // communicate with a remote load balancer server can ignore this field.
- DialCreds credentials.TransportCredentials
- // CredsBundle is the credentials bundle to use when communicating with a
- // remote load balancer server. Balancer implementations which do not
- // communicate with a remote load balancer server can ignore this field.
- CredsBundle credentials.Bundle
- // Dialer is the custom dialer to use when communicating with a remote load
- // balancer server. Balancer implementations which do not communicate with a
- // remote load balancer server can ignore this field.
- Dialer func(context.Context, string) (net.Conn, error)
- // Authority is the server name to use as part of the authentication
- // handshake when communicating with a remote load balancer server. Balancer
- // implementations which do not communicate with a remote load balancer
- // server can ignore this field.
- Authority string
- // ChannelzParent is the parent ClientConn's channelz channel.
- ChannelzParent channelz.Identifier
- // CustomUserAgent is the custom user agent set on the parent ClientConn.
- // The balancer should set the same custom user agent if it creates a
- // ClientConn.
- CustomUserAgent string
- // Target contains the parsed address info of the dial target. It is the
- // same resolver.Target as passed to the resolver. See the documentation for
- // the resolver.Target type for details about what it contains.
- Target resolver.Target
-}
-
-// Builder creates a balancer.
-type Builder interface {
- // Build creates a new balancer with the ClientConn.
- Build(cc ClientConn, opts BuildOptions) Balancer
- // Name returns the name of balancers built by this builder.
- // It will be used to pick balancers (for example in service config).
- Name() string
-}
-
-// ConfigParser parses load balancer configs.
-type ConfigParser interface {
- // ParseConfig parses the JSON load balancer config provided into an
- // internal form or returns an error if the config is invalid. For future
- // compatibility reasons, unknown fields in the config should be ignored.
- ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error)
-}
-
-// PickInfo contains additional information for the Pick operation.
-type PickInfo struct {
- // FullMethodName is the method name that NewClientStream() is called
- // with. The canonical format is /service/Method.
- FullMethodName string
- // Ctx is the RPC's context, and may contain relevant RPC-level information
- // like the outgoing header metadata.
- Ctx context.Context
-}
-
-// DoneInfo contains additional information for done.
-type DoneInfo struct {
- // Err is the rpc error the RPC finished with. It could be nil.
- Err error
- // Trailer contains the metadata from the RPC's trailer, if present.
- Trailer metadata.MD
- // BytesSent indicates if any bytes have been sent to the server.
- BytesSent bool
- // BytesReceived indicates if any byte has been received from the server.
- BytesReceived bool
- // ServerLoad is the load received from server. It's usually sent as part of
- // trailing metadata.
- //
- // The only supported type now is *orca_v3.LoadReport.
- ServerLoad any
-}
-
-var (
- // ErrNoSubConnAvailable indicates no SubConn is available for pick().
- // gRPC will block the RPC until a new picker is available via UpdateState().
- ErrNoSubConnAvailable = errors.New("no SubConn is available")
- // ErrTransientFailure indicates all SubConns are in TransientFailure.
- // WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
- //
- // Deprecated: return an appropriate error based on the last resolution or
- // connection attempt instead. The behavior is the same for any non-gRPC
- // status error.
- ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
-)
-
-// PickResult contains information related to a connection chosen for an RPC.
-type PickResult struct {
- // SubConn is the connection to use for this pick, if its state is Ready.
- // If the state is not Ready, gRPC will block the RPC until a new Picker is
- // provided by the balancer (using ClientConn.UpdateState). The SubConn
- // must be one returned by ClientConn.NewSubConn.
- SubConn SubConn
-
- // Done is called when the RPC is completed. If the SubConn is not ready,
- // this will be called with a nil parameter. If the SubConn is not a valid
- // type, Done may not be called. May be nil if the balancer does not wish
- // to be notified when the RPC completes.
- Done func(DoneInfo)
-
- // Metadata provides a way for LB policies to inject arbitrary per-call
- // metadata. Any metadata returned here will be merged with existing
- // metadata added by the client application.
- //
- // LB policies with child policies are responsible for propagating metadata
- // injected by their children to the ClientConn, as part of Pick().
- Metadata metadata.MD
-}
-
-// TransientFailureError returns e. It exists for backward compatibility and
-// will be deleted soon.
-//
-// Deprecated: no longer necessary, picker errors are treated this way by
-// default.
-func TransientFailureError(e error) error { return e }
-
-// Picker is used by gRPC to pick a SubConn to send an RPC.
-// Balancer is expected to generate a new picker from its snapshot every time its
-// internal state has changed.
-//
-// The pickers used by gRPC can be updated by ClientConn.UpdateState().
-type Picker interface {
- // Pick returns the connection to use for this RPC and related information.
- //
- // Pick should not block. If the balancer needs to do I/O or any blocking
- // or time-consuming work to service this call, it should return
- // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when
- // the Picker is updated (using ClientConn.UpdateState).
- //
- // If an error is returned:
- //
- // - If the error is ErrNoSubConnAvailable, gRPC will block until a new
- // Picker is provided by the balancer (using ClientConn.UpdateState).
- //
- // - If the error is a status error (implemented by the grpc/status
- // package), gRPC will terminate the RPC with the code and message
- // provided.
- //
- // - For all other errors, wait for ready RPCs will wait, but non-wait for
- // ready RPCs will be terminated with this error's Error() string and
- // status code Unavailable.
- Pick(info PickInfo) (PickResult, error)
-}
-
-// Balancer takes input from gRPC, manages SubConns, and collects and aggregates
-// the connectivity states.
-//
-// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
-//
-// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are
-// guaranteed to be called synchronously from the same goroutine. There's no
-// guarantee on picker.Pick, it may be called anytime.
-type Balancer interface {
- // UpdateClientConnState is called by gRPC when the state of the ClientConn
- // changes. If the error returned is ErrBadResolverState, the ClientConn
- // will begin calling ResolveNow on the active name resolver with
- // exponential backoff until a subsequent call to UpdateClientConnState
- // returns a nil error. Any other errors are currently ignored.
- UpdateClientConnState(ClientConnState) error
- // ResolverError is called by gRPC when the name resolver reports an error.
- ResolverError(error)
- // UpdateSubConnState is called by gRPC when the state of a SubConn
- // changes.
- //
- // Deprecated: Use NewSubConnOptions.StateListener when creating the
- // SubConn instead.
- UpdateSubConnState(SubConn, SubConnState)
- // Close closes the balancer. The balancer is not currently required to
- // call SubConn.Shutdown for its existing SubConns; however, this will be
- // required in a future release, so it is recommended.
- Close()
-}
-
-// ExitIdler is an optional interface for balancers to implement. If
-// implemented, ExitIdle will be called when ClientConn.Connect is called, if
-// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause
-// all SubConns to connect.
-//
-// Notice: it will be required for all balancers to implement this in a future
-// release.
-type ExitIdler interface {
- // ExitIdle instructs the LB policy to reconnect to backends / exit the
- // IDLE state, if appropriate and possible. Note that SubConns that enter
- // the IDLE state will not reconnect until SubConn.Connect is called.
- ExitIdle()
-}
-
-// ClientConnState describes the state of a ClientConn relevant to the
-// balancer.
-type ClientConnState struct {
- ResolverState resolver.State
- // The parsed load balancing configuration returned by the builder's
- // ParseConfig method, if implemented.
- BalancerConfig serviceconfig.LoadBalancingConfig
-}
-
-// ErrBadResolverState may be returned by UpdateClientConnState to indicate a
-// problem with the provided name resolver data.
-var ErrBadResolverState = errors.New("bad resolver state")
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
deleted file mode 100644
index d5ed172ae..000000000
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package base
-
-import (
- "errors"
- "fmt"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/resolver"
-)
-
-var logger = grpclog.Component("balancer")
-
-type baseBuilder struct {
- name string
- pickerBuilder PickerBuilder
- config Config
-}
-
-func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
- bal := &baseBalancer{
- cc: cc,
- pickerBuilder: bb.pickerBuilder,
-
- subConns: resolver.NewAddressMap(),
- scStates: make(map[balancer.SubConn]connectivity.State),
- csEvltr: &balancer.ConnectivityStateEvaluator{},
- config: bb.config,
- state: connectivity.Connecting,
- }
- // Initialize picker to a picker that always returns
- // ErrNoSubConnAvailable, because when state of a SubConn changes, we
- // may call UpdateState with this picker.
- bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
- return bal
-}
-
-func (bb *baseBuilder) Name() string {
- return bb.name
-}
-
-type baseBalancer struct {
- cc balancer.ClientConn
- pickerBuilder PickerBuilder
-
- csEvltr *balancer.ConnectivityStateEvaluator
- state connectivity.State
-
- subConns *resolver.AddressMap
- scStates map[balancer.SubConn]connectivity.State
- picker balancer.Picker
- config Config
-
- resolverErr error // the last error reported by the resolver; cleared on successful resolution
- connErr error // the last connection error; cleared upon leaving TransientFailure
-}
-
-func (b *baseBalancer) ResolverError(err error) {
- b.resolverErr = err
- if b.subConns.Len() == 0 {
- b.state = connectivity.TransientFailure
- }
-
- if b.state != connectivity.TransientFailure {
- // The picker will not change since the balancer does not currently
- // report an error.
- return
- }
- b.regeneratePicker()
- b.cc.UpdateState(balancer.State{
- ConnectivityState: b.state,
- Picker: b.picker,
- })
-}
-
-func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
- // TODO: handle s.ResolverState.ServiceConfig?
- if logger.V(2) {
- logger.Info("base.baseBalancer: got new ClientConn state: ", s)
- }
- // Successful resolution; clear resolver error and ensure we return nil.
- b.resolverErr = nil
- // addrsSet is the set converted from addrs, it's used for quick lookup of an address.
- addrsSet := resolver.NewAddressMap()
- for _, a := range s.ResolverState.Addresses {
- addrsSet.Set(a, nil)
- if _, ok := b.subConns.Get(a); !ok {
- // a is a new address (not existing in b.subConns).
- var sc balancer.SubConn
- opts := balancer.NewSubConnOptions{
- HealthCheckEnabled: b.config.HealthCheck,
- StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) },
- }
- sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts)
- if err != nil {
- logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
- continue
- }
- b.subConns.Set(a, sc)
- b.scStates[sc] = connectivity.Idle
- b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle)
- sc.Connect()
- }
- }
- for _, a := range b.subConns.Keys() {
- sci, _ := b.subConns.Get(a)
- sc := sci.(balancer.SubConn)
- // a was removed by resolver.
- if _, ok := addrsSet.Get(a); !ok {
- sc.Shutdown()
- b.subConns.Delete(a)
- // Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
- // The entry will be deleted in updateSubConnState.
- }
- }
- // If resolver state contains no addresses, return an error so ClientConn
- // will trigger re-resolve. Also records this as a resolver error, so when
- // the overall state turns transient failure, the error message will have
- // the zero address information.
- if len(s.ResolverState.Addresses) == 0 {
- b.ResolverError(errors.New("produced zero addresses"))
- return balancer.ErrBadResolverState
- }
-
- b.regeneratePicker()
- b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
- return nil
-}
-
-// mergeErrors builds an error from the last connection error and the last
-// resolver error. Must only be called if b.state is TransientFailure.
-func (b *baseBalancer) mergeErrors() error {
- // connErr must always be non-nil unless there are no SubConns, in which
- // case resolverErr must be non-nil.
- if b.connErr == nil {
- return fmt.Errorf("last resolver error: %v", b.resolverErr)
- }
- if b.resolverErr == nil {
- return fmt.Errorf("last connection error: %v", b.connErr)
- }
- return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr)
-}
-
-// regeneratePicker takes a snapshot of the balancer, and generates a picker
-// from it. The picker is
-// - errPicker if the balancer is in TransientFailure,
-// - built by the pickerBuilder with all READY SubConns otherwise.
-func (b *baseBalancer) regeneratePicker() {
- if b.state == connectivity.TransientFailure {
- b.picker = NewErrPicker(b.mergeErrors())
- return
- }
- readySCs := make(map[balancer.SubConn]SubConnInfo)
-
- // Filter out all ready SCs from full subConn map.
- for _, addr := range b.subConns.Keys() {
- sci, _ := b.subConns.Get(addr)
- sc := sci.(balancer.SubConn)
- if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
- readySCs[sc] = SubConnInfo{Address: addr}
- }
- }
- b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
-}
-
-// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn.
-func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
- logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state)
-}
-
-func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
- s := state.ConnectivityState
- if logger.V(2) {
- logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
- }
- oldS, ok := b.scStates[sc]
- if !ok {
- if logger.V(2) {
- logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
- }
- return
- }
- if oldS == connectivity.TransientFailure &&
- (s == connectivity.Connecting || s == connectivity.Idle) {
- // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or
- // CONNECTING transitions to prevent the aggregated state from being
- // always CONNECTING when many backends exist but are all down.
- if s == connectivity.Idle {
- sc.Connect()
- }
- return
- }
- b.scStates[sc] = s
- switch s {
- case connectivity.Idle:
- sc.Connect()
- case connectivity.Shutdown:
- // When an address was removed by resolver, b called Shutdown but kept
- // the sc's state in scStates. Remove state for this sc here.
- delete(b.scStates, sc)
- case connectivity.TransientFailure:
- // Save error to be reported via picker.
- b.connErr = state.ConnectionError
- }
-
- b.state = b.csEvltr.RecordTransition(oldS, s)
-
- // Regenerate picker when one of the following happens:
- // - this sc entered or left ready
- // - the aggregated state of balancer is TransientFailure
- // (may need to update error message)
- if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
- b.state == connectivity.TransientFailure {
- b.regeneratePicker()
- }
- b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
-}
-
-// Close is a nop because base balancer doesn't have internal state to clean up,
-// and it doesn't need to call Shutdown for the SubConns.
-func (b *baseBalancer) Close() {
-}
-
-// ExitIdle is a nop because the base balancer attempts to stay connected to
-// all SubConns at all times.
-func (b *baseBalancer) ExitIdle() {
-}
-
-// NewErrPicker returns a Picker that always returns err on Pick().
-func NewErrPicker(err error) balancer.Picker {
- return &errPicker{err: err}
-}
-
-// NewErrPickerV2 is temporarily defined for backward compatibility reasons.
-//
-// Deprecated: use NewErrPicker instead.
-var NewErrPickerV2 = NewErrPicker
-
-type errPicker struct {
- err error // Pick() always returns this err.
-}
-
-func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
- return balancer.PickResult{}, p.err
-}
diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go
deleted file mode 100644
index e31d76e33..000000000
--- a/vendor/google.golang.org/grpc/balancer/base/base.go
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package base defines a balancer base that can be used to build balancers with
-// different picking algorithms.
-//
-// The base balancer creates a new SubConn for each resolved address. The
-// provided picker will only be notified about READY SubConns.
-//
-// This package is the base of round_robin balancer, its purpose is to be used
-// to build round_robin like balancers with complex picking algorithms.
-// Balancers with more complicated logic should try to implement a balancer
-// builder from scratch.
-//
-// All APIs in this package are experimental.
-package base
-
-import (
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/resolver"
-)
-
-// PickerBuilder creates balancer.Picker.
-type PickerBuilder interface {
- // Build returns a picker that will be used by gRPC to pick a SubConn.
- Build(info PickerBuildInfo) balancer.Picker
-}
-
-// PickerBuildInfo contains information needed by the picker builder to
-// construct a picker.
-type PickerBuildInfo struct {
- // ReadySCs is a map from all ready SubConns to the Addresses used to
- // create them.
- ReadySCs map[balancer.SubConn]SubConnInfo
-}
-
-// SubConnInfo contains information about a SubConn created by the base
-// balancer.
-type SubConnInfo struct {
- Address resolver.Address // the address used to create this SubConn
-}
-
-// Config contains the config info about the base balancer builder.
-type Config struct {
- // HealthCheck indicates whether health checking should be enabled for this specific balancer.
- HealthCheck bool
-}
-
-// NewBalancerBuilder returns a base balancer builder configured by the provided config.
-func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder {
- return &baseBuilder{
- name: name,
- pickerBuilder: pb,
- config: config,
- }
-}
diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
deleted file mode 100644
index c33413581..000000000
--- a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- *
- * Copyright 2022 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package balancer
-
-import "google.golang.org/grpc/connectivity"
-
-// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
-// and returns one aggregated connectivity state.
-//
-// It's not thread safe.
-type ConnectivityStateEvaluator struct {
- numReady uint64 // Number of addrConns in ready state.
- numConnecting uint64 // Number of addrConns in connecting state.
- numTransientFailure uint64 // Number of addrConns in transient failure state.
- numIdle uint64 // Number of addrConns in idle state.
-}
-
-// RecordTransition records state change happening in subConn and based on that
-// it evaluates what aggregated state should be.
-//
-// - If at least one SubConn in Ready, the aggregated state is Ready;
-// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
-// - Else if at least one SubConn is Idle, the aggregated state is Idle;
-// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure.
-//
-// Shutdown is not considered.
-func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
- // Update counters.
- for idx, state := range []connectivity.State{oldState, newState} {
- updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
- switch state {
- case connectivity.Ready:
- cse.numReady += updateVal
- case connectivity.Connecting:
- cse.numConnecting += updateVal
- case connectivity.TransientFailure:
- cse.numTransientFailure += updateVal
- case connectivity.Idle:
- cse.numIdle += updateVal
- }
- }
- return cse.CurrentState()
-}
-
-// CurrentState returns the current aggregate conn state by evaluating the counters
-func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State {
- // Evaluate.
- if cse.numReady > 0 {
- return connectivity.Ready
- }
- if cse.numConnecting > 0 {
- return connectivity.Connecting
- }
- if cse.numIdle > 0 {
- return connectivity.Idle
- }
- return connectivity.TransientFailure
-}
diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
deleted file mode 100644
index 421c4fecc..000000000
--- a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package endpointsharding implements a load balancing policy that manages
-// homogeneous child policies each owning a single endpoint.
-//
-// # Experimental
-//
-// Notice: This package is EXPERIMENTAL and may be changed or removed in a
-// later release.
-package endpointsharding
-
-import (
- "errors"
- rand "math/rand/v2"
- "sync"
- "sync/atomic"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/balancer/base"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/resolver"
-)
-
-// ChildState is the balancer state of a child along with the endpoint which
-// identifies the child balancer.
-type ChildState struct {
- Endpoint resolver.Endpoint
- State balancer.State
-
- // Balancer exposes only the ExitIdler interface of the child LB policy.
- // Other methods of the child policy are called only by endpointsharding.
- Balancer balancer.ExitIdler
-}
-
-// Options are the options to configure the behaviour of the
-// endpointsharding balancer.
-type Options struct {
- // DisableAutoReconnect allows the balancer to keep child balancer in the
- // IDLE state until they are explicitly triggered to exit using the
- // ChildState obtained from the endpointsharding picker. When set to false,
- // the endpointsharding balancer will automatically call ExitIdle on child
- // connections that report IDLE.
- DisableAutoReconnect bool
-}
-
-// ChildBuilderFunc creates a new balancer with the ClientConn. It has the same
-// type as the balancer.Builder.Build method.
-type ChildBuilderFunc func(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer
-
-// NewBalancer returns a load balancing policy that manages homogeneous child
-// policies each owning a single endpoint. The endpointsharding balancer
-// forwards the LoadBalancingConfig in ClientConn state updates to its children.
-func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions, childBuilder ChildBuilderFunc, esOpts Options) balancer.Balancer {
- es := &endpointSharding{
- cc: cc,
- bOpts: opts,
- esOpts: esOpts,
- childBuilder: childBuilder,
- }
- es.children.Store(resolver.NewEndpointMap())
- return es
-}
-
-// endpointSharding is a balancer that wraps child balancers. It creates a child
-// balancer with child config for every unique Endpoint received. It updates the
-// child states on any update from parent or child.
-type endpointSharding struct {
- cc balancer.ClientConn
- bOpts balancer.BuildOptions
- esOpts Options
- childBuilder ChildBuilderFunc
-
- // childMu synchronizes calls to any single child. It must be held for all
- // calls into a child. To avoid deadlocks, do not acquire childMu while
- // holding mu.
- childMu sync.Mutex
- children atomic.Pointer[resolver.EndpointMap] // endpoint -> *balancerWrapper
-
- // inhibitChildUpdates is set during UpdateClientConnState/ResolverError
- // calls (calls to children will each produce an update, only want one
- // update).
- inhibitChildUpdates atomic.Bool
-
- // mu synchronizes access to the state stored in balancerWrappers in the
- // children field. mu must not be held during calls into a child since
- // synchronous calls back from the child may require taking mu, causing a
- // deadlock. To avoid deadlocks, do not acquire childMu while holding mu.
- mu sync.Mutex
-}
-
-// UpdateClientConnState creates a child for new endpoints and deletes children
-// for endpoints that are no longer present. It also updates all the children,
-// and sends a single synchronous update of the childrens' aggregated state at
-// the end of the UpdateClientConnState operation. If any endpoint has no
-// addresses it will ignore that endpoint. Otherwise, returns first error found
-// from a child, but fully processes the new update.
-func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState) error {
- es.childMu.Lock()
- defer es.childMu.Unlock()
-
- es.inhibitChildUpdates.Store(true)
- defer func() {
- es.inhibitChildUpdates.Store(false)
- es.updateState()
- }()
- var ret error
-
- children := es.children.Load()
- newChildren := resolver.NewEndpointMap()
-
- // Update/Create new children.
- for _, endpoint := range state.ResolverState.Endpoints {
- if _, ok := newChildren.Get(endpoint); ok {
- // Endpoint child was already created, continue to avoid duplicate
- // update.
- continue
- }
- var childBalancer *balancerWrapper
- if val, ok := children.Get(endpoint); ok {
- childBalancer = val.(*balancerWrapper)
- // Endpoint attributes may have changed, update the stored endpoint.
- es.mu.Lock()
- childBalancer.childState.Endpoint = endpoint
- es.mu.Unlock()
- } else {
- childBalancer = &balancerWrapper{
- childState: ChildState{Endpoint: endpoint},
- ClientConn: es.cc,
- es: es,
- }
- childBalancer.childState.Balancer = childBalancer
- childBalancer.child = es.childBuilder(childBalancer, es.bOpts)
- }
- newChildren.Set(endpoint, childBalancer)
- if err := childBalancer.updateClientConnStateLocked(balancer.ClientConnState{
- BalancerConfig: state.BalancerConfig,
- ResolverState: resolver.State{
- Endpoints: []resolver.Endpoint{endpoint},
- Attributes: state.ResolverState.Attributes,
- },
- }); err != nil && ret == nil {
- // Return first error found, and always commit full processing of
- // updating children. If desired to process more specific errors
- // across all endpoints, caller should make these specific
- // validations, this is a current limitation for simplicity sake.
- ret = err
- }
- }
- // Delete old children that are no longer present.
- for _, e := range children.Keys() {
- child, _ := children.Get(e)
- if _, ok := newChildren.Get(e); !ok {
- child.(*balancerWrapper).closeLocked()
- }
- }
- es.children.Store(newChildren)
- if newChildren.Len() == 0 {
- return balancer.ErrBadResolverState
- }
- return ret
-}
-
-// ResolverError forwards the resolver error to all of the endpointSharding's
-// children and sends a single synchronous update of the childStates at the end
-// of the ResolverError operation.
-func (es *endpointSharding) ResolverError(err error) {
- es.childMu.Lock()
- defer es.childMu.Unlock()
- es.inhibitChildUpdates.Store(true)
- defer func() {
- es.inhibitChildUpdates.Store(false)
- es.updateState()
- }()
- children := es.children.Load()
- for _, child := range children.Values() {
- child.(*balancerWrapper).resolverErrorLocked(err)
- }
-}
-
-func (es *endpointSharding) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) {
- // UpdateSubConnState is deprecated.
-}
-
-func (es *endpointSharding) Close() {
- es.childMu.Lock()
- defer es.childMu.Unlock()
- children := es.children.Load()
- for _, child := range children.Values() {
- child.(*balancerWrapper).closeLocked()
- }
-}
-
-// updateState updates this component's state. It sends the aggregated state,
-// and a picker with round robin behavior with all the child states present if
-// needed.
-func (es *endpointSharding) updateState() {
- if es.inhibitChildUpdates.Load() {
- return
- }
- var readyPickers, connectingPickers, idlePickers, transientFailurePickers []balancer.Picker
-
- es.mu.Lock()
- defer es.mu.Unlock()
-
- children := es.children.Load()
- childStates := make([]ChildState, 0, children.Len())
-
- for _, child := range children.Values() {
- bw := child.(*balancerWrapper)
- childState := bw.childState
- childStates = append(childStates, childState)
- childPicker := childState.State.Picker
- switch childState.State.ConnectivityState {
- case connectivity.Ready:
- readyPickers = append(readyPickers, childPicker)
- case connectivity.Connecting:
- connectingPickers = append(connectingPickers, childPicker)
- case connectivity.Idle:
- idlePickers = append(idlePickers, childPicker)
- case connectivity.TransientFailure:
- transientFailurePickers = append(transientFailurePickers, childPicker)
- // connectivity.Shutdown shouldn't appear.
- }
- }
-
- // Construct the round robin picker based off the aggregated state. Whatever
- // the aggregated state, use the pickers present that are currently in that
- // state only.
- var aggState connectivity.State
- var pickers []balancer.Picker
- if len(readyPickers) >= 1 {
- aggState = connectivity.Ready
- pickers = readyPickers
- } else if len(connectingPickers) >= 1 {
- aggState = connectivity.Connecting
- pickers = connectingPickers
- } else if len(idlePickers) >= 1 {
- aggState = connectivity.Idle
- pickers = idlePickers
- } else if len(transientFailurePickers) >= 1 {
- aggState = connectivity.TransientFailure
- pickers = transientFailurePickers
- } else {
- aggState = connectivity.TransientFailure
- pickers = []balancer.Picker{base.NewErrPicker(errors.New("no children to pick from"))}
- } // No children (resolver error before valid update).
- p := &pickerWithChildStates{
- pickers: pickers,
- childStates: childStates,
- next: uint32(rand.IntN(len(pickers))),
- }
- es.cc.UpdateState(balancer.State{
- ConnectivityState: aggState,
- Picker: p,
- })
-}
-
-// pickerWithChildStates delegates to the pickers it holds in a round robin
-// fashion. It also contains the childStates of all the endpointSharding's
-// children.
-type pickerWithChildStates struct {
- pickers []balancer.Picker
- childStates []ChildState
- next uint32
-}
-
-func (p *pickerWithChildStates) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
- nextIndex := atomic.AddUint32(&p.next, 1)
- picker := p.pickers[nextIndex%uint32(len(p.pickers))]
- return picker.Pick(info)
-}
-
-// ChildStatesFromPicker returns the state of all the children managed by the
-// endpoint sharding balancer that created this picker.
-func ChildStatesFromPicker(picker balancer.Picker) []ChildState {
- p, ok := picker.(*pickerWithChildStates)
- if !ok {
- return nil
- }
- return p.childStates
-}
-
-// balancerWrapper is a wrapper of a balancer. It ID's a child balancer by
-// endpoint, and persists recent child balancer state.
-type balancerWrapper struct {
- // The following fields are initialized at build time and read-only after
- // that and therefore do not need to be guarded by a mutex.
-
- // child contains the wrapped balancer. Access its methods only through
- // methods on balancerWrapper to ensure proper synchronization
- child balancer.Balancer
- balancer.ClientConn // embed to intercept UpdateState, doesn't deal with SubConns
-
- es *endpointSharding
-
- // Access to the following fields is guarded by es.mu.
-
- childState ChildState
- isClosed bool
-}
-
-func (bw *balancerWrapper) UpdateState(state balancer.State) {
- bw.es.mu.Lock()
- bw.childState.State = state
- bw.es.mu.Unlock()
- if state.ConnectivityState == connectivity.Idle && !bw.es.esOpts.DisableAutoReconnect {
- bw.ExitIdle()
- }
- bw.es.updateState()
-}
-
-// ExitIdle pings an IDLE child balancer to exit idle in a new goroutine to
-// avoid deadlocks due to synchronous balancer state updates.
-func (bw *balancerWrapper) ExitIdle() {
- if ei, ok := bw.child.(balancer.ExitIdler); ok {
- go func() {
- bw.es.childMu.Lock()
- if !bw.isClosed {
- ei.ExitIdle()
- }
- bw.es.childMu.Unlock()
- }()
- }
-}
-
-// updateClientConnStateLocked delivers the ClientConnState to the child
-// balancer. Callers must hold the child mutex of the parent endpointsharding
-// balancer.
-func (bw *balancerWrapper) updateClientConnStateLocked(ccs balancer.ClientConnState) error {
- return bw.child.UpdateClientConnState(ccs)
-}
-
-// closeLocked closes the child balancer. Callers must hold the child mutext of
-// the parent endpointsharding balancer.
-func (bw *balancerWrapper) closeLocked() {
- bw.child.Close()
- bw.isClosed = true
-}
-
-func (bw *balancerWrapper) resolverErrorLocked(err error) {
- bw.child.ResolverError(err)
-}
diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
deleted file mode 100644
index 4ecfa1c21..000000000
--- a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package state declares grpclb types to be set by resolvers wishing to pass
-// information to grpclb via resolver.State Attributes.
-package state
-
-import (
- "google.golang.org/grpc/resolver"
-)
-
-// keyType is the key to use for storing State in Attributes.
-type keyType string
-
-const key = keyType("grpc.grpclb.state")
-
-// State contains gRPCLB-relevant data passed from the name resolver.
-type State struct {
- // BalancerAddresses contains the remote load balancer address(es). If
- // set, overrides any resolver-provided addresses with Type of GRPCLB.
- BalancerAddresses []resolver.Address
-}
-
-// Set returns a copy of the provided state with attributes containing s. s's
-// data should not be mutated after calling Set.
-func Set(state resolver.State, s *State) resolver.State {
- state.Attributes = state.Attributes.WithValue(key, s)
- return state
-}
-
-// Get returns the grpclb State in the resolver.State, or nil if not present.
-// The returned data should not be mutated.
-func Get(state resolver.State) *State {
- s, _ := state.Attributes.Value(key).(*State)
- return s
-}
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
deleted file mode 100644
index 7d66cb491..000000000
--- a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package internal contains code internal to the pickfirst package.
-package internal
-
-import (
- rand "math/rand/v2"
- "time"
-)
-
-var (
- // RandShuffle pseudo-randomizes the order of addresses.
- RandShuffle = rand.Shuffle
- // TimeAfterFunc allows mocking the timer for testing connection delay
- // related functionality.
- TimeAfterFunc = func(d time.Duration, f func()) func() {
- timer := time.AfterFunc(d, f)
- return func() { timer.Stop() }
- }
-)
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
deleted file mode 100644
index ea8899818..000000000
--- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package pickfirst contains the pick_first load balancing policy.
-package pickfirst
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- rand "math/rand/v2"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/balancer/pickfirst/internal"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/envconfig"
- internalgrpclog "google.golang.org/grpc/internal/grpclog"
- "google.golang.org/grpc/internal/pretty"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/serviceconfig"
-
- _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required.
-)
-
-func init() {
- if envconfig.NewPickFirstEnabled {
- return
- }
- balancer.Register(pickfirstBuilder{})
-}
-
-var logger = grpclog.Component("pick-first-lb")
-
-const (
- // Name is the name of the pick_first balancer.
- Name = "pick_first"
- logPrefix = "[pick-first-lb %p] "
-)
-
-type pickfirstBuilder struct{}
-
-func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
- b := &pickfirstBalancer{cc: cc}
- b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
- return b
-}
-
-func (pickfirstBuilder) Name() string {
- return Name
-}
-
-type pfConfig struct {
- serviceconfig.LoadBalancingConfig `json:"-"`
-
- // If set to true, instructs the LB policy to shuffle the order of the list
- // of endpoints received from the name resolver before attempting to
- // connect to them.
- ShuffleAddressList bool `json:"shuffleAddressList"`
-}
-
-func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
- var cfg pfConfig
- if err := json.Unmarshal(js, &cfg); err != nil {
- return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
- }
- return cfg, nil
-}
-
-type pickfirstBalancer struct {
- logger *internalgrpclog.PrefixLogger
- state connectivity.State
- cc balancer.ClientConn
- subConn balancer.SubConn
-}
-
-func (b *pickfirstBalancer) ResolverError(err error) {
- if b.logger.V(2) {
- b.logger.Infof("Received error from the name resolver: %v", err)
- }
- if b.subConn == nil {
- b.state = connectivity.TransientFailure
- }
-
- if b.state != connectivity.TransientFailure {
- // The picker will not change since the balancer does not currently
- // report an error.
- return
- }
- b.cc.UpdateState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
- })
-}
-
-// Shuffler is an interface for shuffling an address list.
-type Shuffler interface {
- ShuffleAddressListForTesting(n int, swap func(i, j int))
-}
-
-// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n
-// is the number of elements. swap swaps the elements with indexes i and j.
-func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
-
-func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
- if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
- // The resolver reported an empty address list. Treat it like an error by
- // calling b.ResolverError.
- if b.subConn != nil {
- // Shut down the old subConn. All addresses were removed, so it is
- // no longer valid.
- b.subConn.Shutdown()
- b.subConn = nil
- }
- b.ResolverError(errors.New("produced zero addresses"))
- return balancer.ErrBadResolverState
- }
- // We don't have to guard this block with the env var because ParseConfig
- // already does so.
- cfg, ok := state.BalancerConfig.(pfConfig)
- if state.BalancerConfig != nil && !ok {
- return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
- }
-
- if b.logger.V(2) {
- b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
- }
-
- var addrs []resolver.Address
- if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
- // Perform the optional shuffling described in gRFC A62. The shuffling will
- // change the order of endpoints but not touch the order of the addresses
- // within each endpoint. - A61
- if cfg.ShuffleAddressList {
- endpoints = append([]resolver.Endpoint{}, endpoints...)
- internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
- }
-
- // "Flatten the list by concatenating the ordered list of addresses for each
- // of the endpoints, in order." - A61
- for _, endpoint := range endpoints {
- // "In the flattened list, interleave addresses from the two address
- // families, as per RFC-8304 section 4." - A61
- // TODO: support the above language.
- addrs = append(addrs, endpoint.Addresses...)
- }
- } else {
- // Endpoints not set, process addresses until we migrate resolver
- // emissions fully to Endpoints. The top channel does wrap emitted
- // addresses with endpoints, however some balancers such as weighted
- // target do not forward the corresponding correct endpoints down/split
- // endpoints properly. Once all balancers correctly forward endpoints
- // down, can delete this else conditional.
- addrs = state.ResolverState.Addresses
- if cfg.ShuffleAddressList {
- addrs = append([]resolver.Address{}, addrs...)
- rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
- }
- }
-
- if b.subConn != nil {
- b.cc.UpdateAddresses(b.subConn, addrs)
- return nil
- }
-
- var subConn balancer.SubConn
- subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{
- StateListener: func(state balancer.SubConnState) {
- b.updateSubConnState(subConn, state)
- },
- })
- if err != nil {
- if b.logger.V(2) {
- b.logger.Infof("Failed to create new SubConn: %v", err)
- }
- b.state = connectivity.TransientFailure
- b.cc.UpdateState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)},
- })
- return balancer.ErrBadResolverState
- }
- b.subConn = subConn
- b.state = connectivity.Idle
- b.cc.UpdateState(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- b.subConn.Connect()
- return nil
-}
-
-// UpdateSubConnState is unused as a StateListener is always registered when
-// creating SubConns.
-func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
- b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
-}
-
-func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
- if b.logger.V(2) {
- b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state)
- }
- if b.subConn != subConn {
- if b.logger.V(2) {
- b.logger.Infof("Ignored state change because subConn is not recognized")
- }
- return
- }
- if state.ConnectivityState == connectivity.Shutdown {
- b.subConn = nil
- return
- }
-
- switch state.ConnectivityState {
- case connectivity.Ready:
- b.cc.UpdateState(balancer.State{
- ConnectivityState: state.ConnectivityState,
- Picker: &picker{result: balancer.PickResult{SubConn: subConn}},
- })
- case connectivity.Connecting:
- if b.state == connectivity.TransientFailure {
- // We stay in TransientFailure until we are Ready. See A62.
- return
- }
- b.cc.UpdateState(balancer.State{
- ConnectivityState: state.ConnectivityState,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- case connectivity.Idle:
- if b.state == connectivity.TransientFailure {
- // We stay in TransientFailure until we are Ready. Also kick the
- // subConn out of Idle into Connecting. See A62.
- b.subConn.Connect()
- return
- }
- b.cc.UpdateState(balancer.State{
- ConnectivityState: state.ConnectivityState,
- Picker: &idlePicker{subConn: subConn},
- })
- case connectivity.TransientFailure:
- b.cc.UpdateState(balancer.State{
- ConnectivityState: state.ConnectivityState,
- Picker: &picker{err: state.ConnectionError},
- })
- }
- b.state = state.ConnectivityState
-}
-
-func (b *pickfirstBalancer) Close() {
-}
-
-func (b *pickfirstBalancer) ExitIdle() {
- if b.subConn != nil && b.state == connectivity.Idle {
- b.subConn.Connect()
- }
-}
-
-type picker struct {
- result balancer.PickResult
- err error
-}
-
-func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
- return p.result, p.err
-}
-
-// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
-// CONNECTING when Pick is called.
-type idlePicker struct {
- subConn balancer.SubConn
-}
-
-func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
- i.subConn.Connect()
- return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
-}
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
deleted file mode 100644
index 113181e6b..000000000
--- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
+++ /dev/null
@@ -1,932 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package pickfirstleaf contains the pick_first load balancing policy which
-// will be the universal leaf policy after dualstack changes are implemented.
-//
-// # Experimental
-//
-// Notice: This package is EXPERIMENTAL and may be changed or removed in a
-// later release.
-package pickfirstleaf
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "net"
- "net/netip"
- "sync"
- "time"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/balancer/pickfirst/internal"
- "google.golang.org/grpc/connectivity"
- expstats "google.golang.org/grpc/experimental/stats"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/envconfig"
- internalgrpclog "google.golang.org/grpc/internal/grpclog"
- "google.golang.org/grpc/internal/pretty"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/serviceconfig"
-)
-
-func init() {
- if envconfig.NewPickFirstEnabled {
- // Register as the default pick_first balancer.
- Name = "pick_first"
- }
- balancer.Register(pickfirstBuilder{})
-}
-
-type (
- // enableHealthListenerKeyType is a unique key type used in resolver
- // attributes to indicate whether the health listener usage is enabled.
- enableHealthListenerKeyType struct{}
- // managedByPickfirstKeyType is an attribute key type to inform Outlier
- // Detection that the generic health listener is being used.
- // TODO: https://github.com/grpc/grpc-go/issues/7915 - Remove this when
- // implementing the dualstack design. This is a hack. Once Dualstack is
- // completed, outlier detection will stop sending ejection updates through
- // the connectivity listener.
- managedByPickfirstKeyType struct{}
-)
-
-var (
- logger = grpclog.Component("pick-first-leaf-lb")
- // Name is the name of the pick_first_leaf balancer.
- // It is changed to "pick_first" in init() if this balancer is to be
- // registered as the default pickfirst.
- Name = "pick_first_leaf"
- disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
- Name: "grpc.lb.pick_first.disconnections",
- Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
- Unit: "disconnection",
- Labels: []string{"grpc.target"},
- Default: false,
- })
- connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
- Name: "grpc.lb.pick_first.connection_attempts_succeeded",
- Description: "EXPERIMENTAL. Number of successful connection attempts.",
- Unit: "attempt",
- Labels: []string{"grpc.target"},
- Default: false,
- })
- connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
- Name: "grpc.lb.pick_first.connection_attempts_failed",
- Description: "EXPERIMENTAL. Number of failed connection attempts.",
- Unit: "attempt",
- Labels: []string{"grpc.target"},
- Default: false,
- })
-)
-
-const (
- // TODO: change to pick-first when this becomes the default pick_first policy.
- logPrefix = "[pick-first-leaf-lb %p] "
- // connectionDelayInterval is the time to wait for during the happy eyeballs
- // pass before starting the next connection attempt.
- connectionDelayInterval = 250 * time.Millisecond
-)
-
-type ipAddrFamily int
-
-const (
- // ipAddrFamilyUnknown represents strings that can't be parsed as an IP
- // address.
- ipAddrFamilyUnknown ipAddrFamily = iota
- ipAddrFamilyV4
- ipAddrFamilyV6
-)
-
-type pickfirstBuilder struct{}
-
-func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer {
- b := &pickfirstBalancer{
- cc: cc,
- target: bo.Target.String(),
- metricsRecorder: cc.MetricsRecorder(),
-
- subConns: resolver.NewAddressMap(),
- state: connectivity.Connecting,
- cancelConnectionTimer: func() {},
- }
- b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
- return b
-}
-
-func (b pickfirstBuilder) Name() string {
- return Name
-}
-
-func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
- var cfg pfConfig
- if err := json.Unmarshal(js, &cfg); err != nil {
- return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
- }
- return cfg, nil
-}
-
-// EnableHealthListener updates the state to configure pickfirst for using a
-// generic health listener.
-func EnableHealthListener(state resolver.State) resolver.State {
- state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true)
- return state
-}
-
-// IsManagedByPickfirst returns whether an address belongs to a SubConn
-// managed by the pickfirst LB policy.
-// TODO: https://github.com/grpc/grpc-go/issues/7915 - This is a hack to disable
-// outlier_detection via the with connectivity listener when using pick_first.
-// Once Dualstack changes are complete, all SubConns will be created by
-// pick_first and outlier detection will only use the health listener for
-// ejection. This hack can then be removed.
-func IsManagedByPickfirst(addr resolver.Address) bool {
- return addr.BalancerAttributes.Value(managedByPickfirstKeyType{}) != nil
-}
-
-type pfConfig struct {
- serviceconfig.LoadBalancingConfig `json:"-"`
-
- // If set to true, instructs the LB policy to shuffle the order of the list
- // of endpoints received from the name resolver before attempting to
- // connect to them.
- ShuffleAddressList bool `json:"shuffleAddressList"`
-}
-
-// scData keeps track of the current state of the subConn.
-// It is not safe for concurrent access.
-type scData struct {
- // The following fields are initialized at build time and read-only after
- // that.
- subConn balancer.SubConn
- addr resolver.Address
-
- rawConnectivityState connectivity.State
- // The effective connectivity state based on raw connectivity, health state
- // and after following sticky TransientFailure behaviour defined in A62.
- effectiveState connectivity.State
- lastErr error
- connectionFailedInFirstPass bool
-}
-
-func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
- addr.BalancerAttributes = addr.BalancerAttributes.WithValue(managedByPickfirstKeyType{}, true)
- sd := &scData{
- rawConnectivityState: connectivity.Idle,
- effectiveState: connectivity.Idle,
- addr: addr,
- }
- sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
- StateListener: func(state balancer.SubConnState) {
- b.updateSubConnState(sd, state)
- },
- })
- if err != nil {
- return nil, err
- }
- sd.subConn = sc
- return sd, nil
-}
-
-type pickfirstBalancer struct {
- // The following fields are initialized at build time and read-only after
- // that and therefore do not need to be guarded by a mutex.
- logger *internalgrpclog.PrefixLogger
- cc balancer.ClientConn
- target string
- metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil
-
- // The mutex is used to ensure synchronization of updates triggered
- // from the idle picker and the already serialized resolver,
- // SubConn state updates.
- mu sync.Mutex
- // State reported to the channel based on SubConn states and resolver
- // updates.
- state connectivity.State
- // scData for active subonns mapped by address.
- subConns *resolver.AddressMap
- addressList addressList
- firstPass bool
- numTF int
- cancelConnectionTimer func()
- healthCheckingEnabled bool
-}
-
-// ResolverError is called by the ClientConn when the name resolver produces
-// an error or when pickfirst determined the resolver update to be invalid.
-func (b *pickfirstBalancer) ResolverError(err error) {
- b.mu.Lock()
- defer b.mu.Unlock()
- b.resolverErrorLocked(err)
-}
-
-func (b *pickfirstBalancer) resolverErrorLocked(err error) {
- if b.logger.V(2) {
- b.logger.Infof("Received error from the name resolver: %v", err)
- }
-
- // The picker will not change since the balancer does not currently
- // report an error. If the balancer hasn't received a single good resolver
- // update yet, transition to TRANSIENT_FAILURE.
- if b.state != connectivity.TransientFailure && b.addressList.size() > 0 {
- if b.logger.V(2) {
- b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.")
- }
- return
- }
-
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
- })
-}
-
-func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
- b.mu.Lock()
- defer b.mu.Unlock()
- b.cancelConnectionTimer()
- if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
- // Cleanup state pertaining to the previous resolver state.
- // Treat an empty address list like an error by calling b.ResolverError.
- b.closeSubConnsLocked()
- b.addressList.updateAddrs(nil)
- b.resolverErrorLocked(errors.New("produced zero addresses"))
- return balancer.ErrBadResolverState
- }
- b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil
- cfg, ok := state.BalancerConfig.(pfConfig)
- if state.BalancerConfig != nil && !ok {
- return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
- }
-
- if b.logger.V(2) {
- b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
- }
-
- var newAddrs []resolver.Address
- if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
- // Perform the optional shuffling described in gRFC A62. The shuffling
- // will change the order of endpoints but not touch the order of the
- // addresses within each endpoint. - A61
- if cfg.ShuffleAddressList {
- endpoints = append([]resolver.Endpoint{}, endpoints...)
- internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
- }
-
- // "Flatten the list by concatenating the ordered list of addresses for
- // each of the endpoints, in order." - A61
- for _, endpoint := range endpoints {
- newAddrs = append(newAddrs, endpoint.Addresses...)
- }
- } else {
- // Endpoints not set, process addresses until we migrate resolver
- // emissions fully to Endpoints. The top channel does wrap emitted
- // addresses with endpoints, however some balancers such as weighted
- // target do not forward the corresponding correct endpoints down/split
- // endpoints properly. Once all balancers correctly forward endpoints
- // down, can delete this else conditional.
- newAddrs = state.ResolverState.Addresses
- if cfg.ShuffleAddressList {
- newAddrs = append([]resolver.Address{}, newAddrs...)
- internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
- }
- }
-
- // If an address appears in multiple endpoints or in the same endpoint
- // multiple times, we keep it only once. We will create only one SubConn
- // for the address because an AddressMap is used to store SubConns.
- // Not de-duplicating would result in attempting to connect to the same
- // SubConn multiple times in the same pass. We don't want this.
- newAddrs = deDupAddresses(newAddrs)
- newAddrs = interleaveAddresses(newAddrs)
-
- prevAddr := b.addressList.currentAddress()
- prevSCData, found := b.subConns.Get(prevAddr)
- prevAddrsCount := b.addressList.size()
- isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready
- b.addressList.updateAddrs(newAddrs)
-
- // If the previous ready SubConn exists in new address list,
- // keep this connection and don't create new SubConns.
- if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) {
- return nil
- }
-
- b.reconcileSubConnsLocked(newAddrs)
- // If it's the first resolver update or the balancer was already READY
- // (but the new address list does not contain the ready SubConn) or
- // CONNECTING, enter CONNECTING.
- // We may be in TRANSIENT_FAILURE due to a previous empty address list,
- // we should still enter CONNECTING because the sticky TF behaviour
- // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
- // due to connectivity failures.
- if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 {
- // Start connection attempt at first address.
- b.forceUpdateConcludedStateLocked(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- b.startFirstPassLocked()
- } else if b.state == connectivity.TransientFailure {
- // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
- // we're READY. See A62.
- b.startFirstPassLocked()
- }
- return nil
-}
-
-// UpdateSubConnState is unused as a StateListener is always registered when
-// creating SubConns.
-func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
- b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
-}
-
-func (b *pickfirstBalancer) Close() {
- b.mu.Lock()
- defer b.mu.Unlock()
- b.closeSubConnsLocked()
- b.cancelConnectionTimer()
- b.state = connectivity.Shutdown
-}
-
-// ExitIdle moves the balancer out of idle state. It can be called concurrently
-// by the idlePicker and clientConn so access to variables should be
-// synchronized.
-func (b *pickfirstBalancer) ExitIdle() {
- b.mu.Lock()
- defer b.mu.Unlock()
- if b.state == connectivity.Idle {
- b.startFirstPassLocked()
- }
-}
-
-func (b *pickfirstBalancer) startFirstPassLocked() {
- b.firstPass = true
- b.numTF = 0
- // Reset the connection attempt record for existing SubConns.
- for _, sd := range b.subConns.Values() {
- sd.(*scData).connectionFailedInFirstPass = false
- }
- b.requestConnectionLocked()
-}
-
-func (b *pickfirstBalancer) closeSubConnsLocked() {
- for _, sd := range b.subConns.Values() {
- sd.(*scData).subConn.Shutdown()
- }
- b.subConns = resolver.NewAddressMap()
-}
-
-// deDupAddresses ensures that each address appears only once in the slice.
-func deDupAddresses(addrs []resolver.Address) []resolver.Address {
- seenAddrs := resolver.NewAddressMap()
- retAddrs := []resolver.Address{}
-
- for _, addr := range addrs {
- if _, ok := seenAddrs.Get(addr); ok {
- continue
- }
- retAddrs = append(retAddrs, addr)
- }
- return retAddrs
-}
-
-// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6)
-// as per RFC-8305 section 4.
-// Whichever address family is first in the list is followed by an address of
-// the other address family; that is, if the first address in the list is IPv6,
-// then the first IPv4 address should be moved up in the list to be second in
-// the list. It doesn't support configuring "First Address Family Count", i.e.
-// there will always be a single member of the first address family at the
-// beginning of the interleaved list.
-// Addresses that are neither IPv4 nor IPv6 are treated as part of a third
-// "unknown" family for interleaving.
-// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6
-func interleaveAddresses(addrs []resolver.Address) []resolver.Address {
- familyAddrsMap := map[ipAddrFamily][]resolver.Address{}
- interleavingOrder := []ipAddrFamily{}
- for _, addr := range addrs {
- family := addressFamily(addr.Addr)
- if _, found := familyAddrsMap[family]; !found {
- interleavingOrder = append(interleavingOrder, family)
- }
- familyAddrsMap[family] = append(familyAddrsMap[family], addr)
- }
-
- interleavedAddrs := make([]resolver.Address, 0, len(addrs))
-
- for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) {
- // Some IP types may have fewer addresses than others, so we look for
- // the next type that has a remaining member to add to the interleaved
- // list.
- family := interleavingOrder[curFamilyIdx]
- remainingMembers := familyAddrsMap[family]
- if len(remainingMembers) > 0 {
- interleavedAddrs = append(interleavedAddrs, remainingMembers[0])
- familyAddrsMap[family] = remainingMembers[1:]
- }
- }
-
- return interleavedAddrs
-}
-
-// addressFamily returns the ipAddrFamily after parsing the address string.
-// If the address isn't of the format "ip-address:port", it returns
-// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when
-// using a resolver like passthrough where the address may be a hostname in
-// some format that the dialer can resolve.
-func addressFamily(address string) ipAddrFamily {
- // Parse the IP after removing the port.
- host, _, err := net.SplitHostPort(address)
- if err != nil {
- return ipAddrFamilyUnknown
- }
- ip, err := netip.ParseAddr(host)
- if err != nil {
- return ipAddrFamilyUnknown
- }
- switch {
- case ip.Is4() || ip.Is4In6():
- return ipAddrFamilyV4
- case ip.Is6():
- return ipAddrFamilyV6
- default:
- return ipAddrFamilyUnknown
- }
-}
-
-// reconcileSubConnsLocked updates the active subchannels based on a new address
-// list from the resolver. It does this by:
-// - closing subchannels: any existing subchannels associated with addresses
-// that are no longer in the updated list are shut down.
-// - removing subchannels: entries for these closed subchannels are removed
-// from the subchannel map.
-//
-// This ensures that the subchannel map accurately reflects the current set of
-// addresses received from the name resolver.
-func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
- newAddrsMap := resolver.NewAddressMap()
- for _, addr := range newAddrs {
- newAddrsMap.Set(addr, true)
- }
-
- for _, oldAddr := range b.subConns.Keys() {
- if _, ok := newAddrsMap.Get(oldAddr); ok {
- continue
- }
- val, _ := b.subConns.Get(oldAddr)
- val.(*scData).subConn.Shutdown()
- b.subConns.Delete(oldAddr)
- }
-}
-
-// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
-// becomes ready, which means that all other subConn must be shutdown.
-func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
- b.cancelConnectionTimer()
- for _, v := range b.subConns.Values() {
- sd := v.(*scData)
- if sd.subConn != selected.subConn {
- sd.subConn.Shutdown()
- }
- }
- b.subConns = resolver.NewAddressMap()
- b.subConns.Set(selected.addr, selected)
-}
-
-// requestConnectionLocked starts connecting on the subchannel corresponding to
-// the current address. If no subchannel exists, one is created. If the current
-// subchannel is in TransientFailure, a connection to the next address is
-// attempted until a subchannel is found.
-func (b *pickfirstBalancer) requestConnectionLocked() {
- if !b.addressList.isValid() {
- return
- }
- var lastErr error
- for valid := true; valid; valid = b.addressList.increment() {
- curAddr := b.addressList.currentAddress()
- sd, ok := b.subConns.Get(curAddr)
- if !ok {
- var err error
- // We want to assign the new scData to sd from the outer scope,
- // hence we can't use := below.
- sd, err = b.newSCData(curAddr)
- if err != nil {
- // This should never happen, unless the clientConn is being shut
- // down.
- if b.logger.V(2) {
- b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err)
- }
- // Do nothing, the LB policy will be closed soon.
- return
- }
- b.subConns.Set(curAddr, sd)
- }
-
- scd := sd.(*scData)
- switch scd.rawConnectivityState {
- case connectivity.Idle:
- scd.subConn.Connect()
- b.scheduleNextConnectionLocked()
- return
- case connectivity.TransientFailure:
- // The SubConn is being re-used and failed during a previous pass
- // over the addressList. It has not completed backoff yet.
- // Mark it as having failed and try the next address.
- scd.connectionFailedInFirstPass = true
- lastErr = scd.lastErr
- continue
- case connectivity.Connecting:
- // Wait for the connection attempt to complete or the timer to fire
- // before attempting the next address.
- b.scheduleNextConnectionLocked()
- return
- default:
- b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState)
- return
-
- }
- }
-
- // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
- // first pass if possible.
- b.endFirstPassIfPossibleLocked(lastErr)
-}
-
-func (b *pickfirstBalancer) scheduleNextConnectionLocked() {
- b.cancelConnectionTimer()
- if !b.addressList.hasNext() {
- return
- }
- curAddr := b.addressList.currentAddress()
- cancelled := false // Access to this is protected by the balancer's mutex.
- closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() {
- b.mu.Lock()
- defer b.mu.Unlock()
- // If the scheduled task is cancelled while acquiring the mutex, return.
- if cancelled {
- return
- }
- if b.logger.V(2) {
- b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr)
- }
- if b.addressList.increment() {
- b.requestConnectionLocked()
- }
- })
- // Access to the cancellation callback held by the balancer is guarded by
- // the balancer's mutex, so it's safe to set the boolean from the callback.
- b.cancelConnectionTimer = sync.OnceFunc(func() {
- cancelled = true
- closeFn()
- })
-}
-
-func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
- b.mu.Lock()
- defer b.mu.Unlock()
- oldState := sd.rawConnectivityState
- sd.rawConnectivityState = newState.ConnectivityState
- // Previously relevant SubConns can still callback with state updates.
- // To prevent pickers from returning these obsolete SubConns, this logic
- // is included to check if the current list of active SubConns includes this
- // SubConn.
- if !b.isActiveSCData(sd) {
- return
- }
- if newState.ConnectivityState == connectivity.Shutdown {
- sd.effectiveState = connectivity.Shutdown
- return
- }
-
- // Record a connection attempt when exiting CONNECTING.
- if newState.ConnectivityState == connectivity.TransientFailure {
- sd.connectionFailedInFirstPass = true
- connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target)
- }
-
- if newState.ConnectivityState == connectivity.Ready {
- connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
- b.shutdownRemainingLocked(sd)
- if !b.addressList.seekTo(sd.addr) {
- // This should not fail as we should have only one SubConn after
- // entering READY. The SubConn should be present in the addressList.
- b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
- return
- }
- if !b.healthCheckingEnabled {
- if b.logger.V(2) {
- b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn)
- }
-
- sd.effectiveState = connectivity.Ready
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Ready,
- Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
- })
- return
- }
- if b.logger.V(2) {
- b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn)
- }
- // Send a CONNECTING update to take the SubConn out of sticky-TF if
- // required.
- sd.effectiveState = connectivity.Connecting
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) {
- b.updateSubConnHealthState(sd, scs)
- })
- return
- }
-
- // If the LB policy is READY, and it receives a subchannel state change,
- // it means that the READY subchannel has failed.
- // A SubConn can also transition from CONNECTING directly to IDLE when
- // a transport is successfully created, but the connection fails
- // before the SubConn can send the notification for READY. We treat
- // this as a successful connection and transition to IDLE.
- // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
- // part of the if condition below once the issue is fixed.
- if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
- // Once a transport fails, the balancer enters IDLE and starts from
- // the first address when the picker is used.
- b.shutdownRemainingLocked(sd)
- sd.effectiveState = newState.ConnectivityState
- // READY SubConn interspliced in between CONNECTING and IDLE, need to
- // account for that.
- if oldState == connectivity.Connecting {
- // A known issue (https://github.com/grpc/grpc-go/issues/7862)
- // causes a race that prevents the READY state change notification.
- // This works around it.
- connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
- }
- disconnectionsMetric.Record(b.metricsRecorder, 1, b.target)
- b.addressList.reset()
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Idle,
- Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
- })
- return
- }
-
- if b.firstPass {
- switch newState.ConnectivityState {
- case connectivity.Connecting:
- // The effective state can be in either IDLE, CONNECTING or
- // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in
- // TRANSIENT_FAILURE until it's READY. See A62.
- if sd.effectiveState != connectivity.TransientFailure {
- sd.effectiveState = connectivity.Connecting
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- }
- case connectivity.TransientFailure:
- sd.lastErr = newState.ConnectionError
- sd.effectiveState = connectivity.TransientFailure
- // Since we're re-using common SubConns while handling resolver
- // updates, we could receive an out of turn TRANSIENT_FAILURE from
- // a pass over the previous address list. Happy Eyeballs will also
- // cause out of order updates to arrive.
-
- if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
- b.cancelConnectionTimer()
- if b.addressList.increment() {
- b.requestConnectionLocked()
- return
- }
- }
-
- // End the first pass if we've seen a TRANSIENT_FAILURE from all
- // SubConns once.
- b.endFirstPassIfPossibleLocked(newState.ConnectionError)
- }
- return
- }
-
- // We have finished the first pass, keep re-connecting failing SubConns.
- switch newState.ConnectivityState {
- case connectivity.TransientFailure:
- b.numTF = (b.numTF + 1) % b.subConns.Len()
- sd.lastErr = newState.ConnectionError
- if b.numTF%b.subConns.Len() == 0 {
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: newState.ConnectionError},
- })
- }
- // We don't need to request re-resolution since the SubConn already
- // does that before reporting TRANSIENT_FAILURE.
- // TODO: #7534 - Move re-resolution requests from SubConn into
- // pick_first.
- case connectivity.Idle:
- sd.subConn.Connect()
- }
-}
-
-// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the
-// addresses are tried and their SubConns have reported a failure.
-func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
- // An optimization to avoid iterating over the entire SubConn map.
- if b.addressList.isValid() {
- return
- }
- // Connect() has been called on all the SubConns. The first pass can be
- // ended if all the SubConns have reported a failure.
- for _, v := range b.subConns.Values() {
- sd := v.(*scData)
- if !sd.connectionFailedInFirstPass {
- return
- }
- }
- b.firstPass = false
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: lastErr},
- })
- // Start re-connecting all the SubConns that are already in IDLE.
- for _, v := range b.subConns.Values() {
- sd := v.(*scData)
- if sd.rawConnectivityState == connectivity.Idle {
- sd.subConn.Connect()
- }
- }
-}
-
-func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool {
- activeSD, found := b.subConns.Get(sd.addr)
- return found && activeSD == sd
-}
-
-func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) {
- b.mu.Lock()
- defer b.mu.Unlock()
- // Previously relevant SubConns can still callback with state updates.
- // To prevent pickers from returning these obsolete SubConns, this logic
- // is included to check if the current list of active SubConns includes
- // this SubConn.
- if !b.isActiveSCData(sd) {
- return
- }
- sd.effectiveState = state.ConnectivityState
- switch state.ConnectivityState {
- case connectivity.Ready:
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Ready,
- Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
- })
- case connectivity.TransientFailure:
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)},
- })
- case connectivity.Connecting:
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- default:
- b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state)
- }
-}
-
-// updateBalancerState stores the state reported to the channel and calls
-// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate
-// updates to the channel.
-func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) {
- // In case of TransientFailures allow the picker to be updated to update
- // the connectivity error, in all other cases don't send duplicate state
- // updates.
- if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure {
- return
- }
- b.forceUpdateConcludedStateLocked(newState)
-}
-
-// forceUpdateConcludedStateLocked stores the state reported to the channel and
-// calls ClientConn.UpdateState().
-// A separate function is defined to force update the ClientConn state since the
-// channel doesn't correctly assume that LB policies start in CONNECTING and
-// relies on LB policy to send an initial CONNECTING update.
-func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) {
- b.state = newState.ConnectivityState
- b.cc.UpdateState(newState)
-}
-
-type picker struct {
- result balancer.PickResult
- err error
-}
-
-func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
- return p.result, p.err
-}
-
-// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
-// CONNECTING when Pick is called.
-type idlePicker struct {
- exitIdle func()
-}
-
-func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
- i.exitIdle()
- return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
-}
-
-// addressList manages sequentially iterating over addresses present in a list
-// of endpoints. It provides a 1 dimensional view of the addresses present in
-// the endpoints.
-// This type is not safe for concurrent access.
-type addressList struct {
- addresses []resolver.Address
- idx int
-}
-
-func (al *addressList) isValid() bool {
- return al.idx < len(al.addresses)
-}
-
-func (al *addressList) size() int {
- return len(al.addresses)
-}
-
-// increment moves to the next index in the address list.
-// This method returns false if it went off the list, true otherwise.
-func (al *addressList) increment() bool {
- if !al.isValid() {
- return false
- }
- al.idx++
- return al.idx < len(al.addresses)
-}
-
-// currentAddress returns the current address pointed to in the addressList.
-// If the list is in an invalid state, it returns an empty address instead.
-func (al *addressList) currentAddress() resolver.Address {
- if !al.isValid() {
- return resolver.Address{}
- }
- return al.addresses[al.idx]
-}
-
-func (al *addressList) reset() {
- al.idx = 0
-}
-
-func (al *addressList) updateAddrs(addrs []resolver.Address) {
- al.addresses = addrs
- al.reset()
-}
-
-// seekTo returns false if the needle was not found and the current index was
-// left unchanged.
-func (al *addressList) seekTo(needle resolver.Address) bool {
- for ai, addr := range al.addresses {
- if !equalAddressIgnoringBalAttributes(&addr, &needle) {
- continue
- }
- al.idx = ai
- return true
- }
- return false
-}
-
-// hasNext returns whether incrementing the addressList will result in moving
-// past the end of the list. If the list has already moved past the end, it
-// returns false.
-func (al *addressList) hasNext() bool {
- if !al.isValid() {
- return false
- }
- return al.idx+1 < len(al.addresses)
-}
-
-// equalAddressIgnoringBalAttributes returns true is a and b are considered
-// equal. This is different from the Equal method on the resolver.Address type
-// which considers all fields to determine equality. Here, we only consider
-// fields that are meaningful to the SubConn.
-func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
- return a.Addr == b.Addr && a.ServerName == b.ServerName &&
- a.Attributes.Equal(b.Attributes) &&
- a.Metadata == b.Metadata
-}
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
deleted file mode 100644
index 35da5d1ec..000000000
--- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is
-// installed as one of the default balancers in gRPC, users don't need to
-// explicitly install this balancer.
-package roundrobin
-
-import (
- "fmt"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/balancer/endpointsharding"
- "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf"
- "google.golang.org/grpc/grpclog"
- internalgrpclog "google.golang.org/grpc/internal/grpclog"
-)
-
-// Name is the name of round_robin balancer.
-const Name = "round_robin"
-
-var logger = grpclog.Component("roundrobin")
-
-func init() {
- balancer.Register(builder{})
-}
-
-type builder struct{}
-
-func (bb builder) Name() string {
- return Name
-}
-
-func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
- childBuilder := balancer.Get(pickfirstleaf.Name).Build
- bal := &rrBalancer{
- cc: cc,
- Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}),
- }
- bal.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[%p] ", bal))
- bal.logger.Infof("Created")
- return bal
-}
-
-type rrBalancer struct {
- balancer.Balancer
- cc balancer.ClientConn
- logger *internalgrpclog.PrefixLogger
-}
-
-func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error {
- return b.Balancer.UpdateClientConnState(balancer.ClientConnState{
- // Enable the health listener in pickfirst children for client side health
- // checks and outlier detection, if configured.
- ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState),
- })
-}
-
-func (b *rrBalancer) ExitIdle() {
- // Should always be ok, as child is endpoint sharding.
- if ei, ok := b.Balancer.(balancer.ExitIdler); ok {
- ei.ExitIdle()
- }
-}
diff --git a/vendor/google.golang.org/grpc/balancer/subconn.go b/vendor/google.golang.org/grpc/balancer/subconn.go
deleted file mode 100644
index 9ee44d4af..000000000
--- a/vendor/google.golang.org/grpc/balancer/subconn.go
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package balancer
-
-import (
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/internal"
- "google.golang.org/grpc/resolver"
-)
-
-// A SubConn represents a single connection to a gRPC backend service.
-//
-// All SubConns start in IDLE, and will not try to connect. To trigger a
-// connection attempt, Balancers must call Connect.
-//
-// If the connection attempt fails, the SubConn will transition to
-// TRANSIENT_FAILURE for a backoff period, and then return to IDLE. If the
-// connection attempt succeeds, it will transition to READY.
-//
-// If a READY SubConn becomes disconnected, the SubConn will transition to IDLE.
-//
-// If a connection re-enters IDLE, Balancers must call Connect again to trigger
-// a new connection attempt.
-//
-// Each SubConn contains a list of addresses. gRPC will try to connect to the
-// addresses in sequence, and stop trying the remainder once the first
-// connection is successful. However, this behavior is deprecated. SubConns
-// should only use a single address.
-//
-// NOTICE: This interface is intended to be implemented by gRPC, or intercepted
-// by custom load balancing polices. Users should not need their own complete
-// implementation of this interface -- they should always delegate to a SubConn
-// returned by ClientConn.NewSubConn() by embedding it in their implementations.
-// An embedded SubConn must never be nil, or runtime panics will occur.
-type SubConn interface {
- // UpdateAddresses updates the addresses used in this SubConn.
- // gRPC checks if currently-connected address is still in the new list.
- // If it's in the list, the connection will be kept.
- // If it's not in the list, the connection will gracefully close, and
- // a new connection will be created.
- //
- // This will trigger a state transition for the SubConn.
- //
- // Deprecated: this method will be removed. Create new SubConns for new
- // addresses instead.
- UpdateAddresses([]resolver.Address)
- // Connect starts the connecting for this SubConn.
- Connect()
- // GetOrBuildProducer returns a reference to the existing Producer for this
- // ProducerBuilder in this SubConn, or, if one does not currently exist,
- // creates a new one and returns it. Returns a close function which may be
- // called when the Producer is no longer needed. Otherwise the producer
- // will automatically be closed upon connection loss or subchannel close.
- // Should only be called on a SubConn in state Ready. Otherwise the
- // producer will be unable to create streams.
- GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
- // Shutdown shuts down the SubConn gracefully. Any started RPCs will be
- // allowed to complete. No future calls should be made on the SubConn.
- // One final state update will be delivered to the StateListener (or
- // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to
- // indicate the shutdown operation. This may be delivered before
- // in-progress RPCs are complete and the actual connection is closed.
- Shutdown()
- // RegisterHealthListener registers a health listener that receives health
- // updates for a Ready SubConn. Only one health listener can be registered
- // at a time. A health listener should be registered each time the SubConn's
- // connectivity state changes to READY. Registering a health listener when
- // the connectivity state is not READY may result in undefined behaviour.
- // This method must not be called synchronously while handling an update
- // from a previously registered health listener.
- RegisterHealthListener(func(SubConnState))
- // EnforceSubConnEmbedding is included to force implementers to embed
- // another implementation of this interface, allowing gRPC to add methods
- // without breaking users.
- internal.EnforceSubConnEmbedding
-}
-
-// A ProducerBuilder is a simple constructor for a Producer. It is used by the
-// SubConn to create producers when needed.
-type ProducerBuilder interface {
- // Build creates a Producer. The first parameter is always a
- // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
- // associated SubConn), but is declared as `any` to avoid a dependency
- // cycle. Build also returns a close function that will be called when all
- // references to the Producer have been given up for a SubConn, or when a
- // connectivity state change occurs on the SubConn. The close function
- // should always block until all asynchronous cleanup work is completed.
- Build(grpcClientConnInterface any) (p Producer, close func())
-}
-
-// SubConnState describes the state of a SubConn.
-type SubConnState struct {
- // ConnectivityState is the connectivity state of the SubConn.
- ConnectivityState connectivity.State
- // ConnectionError is set if the ConnectivityState is TransientFailure,
- // describing the reason the SubConn failed. Otherwise, it is nil.
- ConnectionError error
- // connectedAddr contains the connected address when ConnectivityState is
- // Ready. Otherwise, it is indeterminate.
- connectedAddress resolver.Address
-}
-
-// connectedAddress returns the connected address for a SubConnState. The
-// address is only valid if the state is READY.
-func connectedAddress(scs SubConnState) resolver.Address {
- return scs.connectedAddress
-}
-
-// setConnectedAddress sets the connected address for a SubConnState.
-func setConnectedAddress(scs *SubConnState, addr resolver.Address) {
- scs.connectedAddress = addr
-}
-
-// A Producer is a type shared among potentially many consumers. It is
-// associated with a SubConn, and an implementation will typically contain
-// other methods to provide additional functionality, e.g. configuration or
-// subscription registration.
-type Producer any
diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go
deleted file mode 100644
index 948a21ef6..000000000
--- a/vendor/google.golang.org/grpc/balancer_wrapper.go
+++ /dev/null
@@ -1,520 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
- "fmt"
- "sync"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/experimental/stats"
- "google.golang.org/grpc/internal"
- "google.golang.org/grpc/internal/balancer/gracefulswitch"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcsync"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/status"
-)
-
-var (
- setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address))
- // noOpRegisterHealthListenerFn is used when client side health checking is
- // disabled. It sends a single READY update on the registered listener.
- noOpRegisterHealthListenerFn = func(_ context.Context, listener func(balancer.SubConnState)) func() {
- listener(balancer.SubConnState{ConnectivityState: connectivity.Ready})
- return func() {}
- }
-)
-
-// ccBalancerWrapper sits between the ClientConn and the Balancer.
-//
-// ccBalancerWrapper implements methods corresponding to the ones on the
-// balancer.Balancer interface. The ClientConn is free to call these methods
-// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn
-// to the Balancer happen in order by performing them in the serializer, without
-// any mutexes held.
-//
-// ccBalancerWrapper also implements the balancer.ClientConn interface and is
-// passed to the Balancer implementations. It invokes unexported methods on the
-// ClientConn to handle these calls from the Balancer.
-//
-// It uses the gracefulswitch.Balancer internally to ensure that balancer
-// switches happen in a graceful manner.
-type ccBalancerWrapper struct {
- internal.EnforceClientConnEmbedding
- // The following fields are initialized when the wrapper is created and are
- // read-only afterwards, and therefore can be accessed without a mutex.
- cc *ClientConn
- opts balancer.BuildOptions
- serializer *grpcsync.CallbackSerializer
- serializerCancel context.CancelFunc
-
- // The following fields are only accessed within the serializer or during
- // initialization.
- curBalancerName string
- balancer *gracefulswitch.Balancer
-
- // The following field is protected by mu. Caller must take cc.mu before
- // taking mu.
- mu sync.Mutex
- closed bool
-}
-
-// newCCBalancerWrapper creates a new balancer wrapper in idle state. The
-// underlying balancer is not created until the updateClientConnState() method
-// is invoked.
-func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
- ctx, cancel := context.WithCancel(cc.ctx)
- ccb := &ccBalancerWrapper{
- cc: cc,
- opts: balancer.BuildOptions{
- DialCreds: cc.dopts.copts.TransportCredentials,
- CredsBundle: cc.dopts.copts.CredsBundle,
- Dialer: cc.dopts.copts.Dialer,
- Authority: cc.authority,
- CustomUserAgent: cc.dopts.copts.UserAgent,
- ChannelzParent: cc.channelz,
- Target: cc.parsedTarget,
- },
- serializer: grpcsync.NewCallbackSerializer(ctx),
- serializerCancel: cancel,
- }
- ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts)
- return ccb
-}
-
-func (ccb *ccBalancerWrapper) MetricsRecorder() stats.MetricsRecorder {
- return ccb.cc.metricsRecorderList
-}
-
-// updateClientConnState is invoked by grpc to push a ClientConnState update to
-// the underlying balancer. This is always executed from the serializer, so
-// it is safe to call into the balancer here.
-func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
- errCh := make(chan error)
- uccs := func(ctx context.Context) {
- defer close(errCh)
- if ctx.Err() != nil || ccb.balancer == nil {
- return
- }
- name := gracefulswitch.ChildName(ccs.BalancerConfig)
- if ccb.curBalancerName != name {
- ccb.curBalancerName = name
- channelz.Infof(logger, ccb.cc.channelz, "Channel switches to new LB policy %q", name)
- }
- err := ccb.balancer.UpdateClientConnState(*ccs)
- if logger.V(2) && err != nil {
- logger.Infof("error from balancer.UpdateClientConnState: %v", err)
- }
- errCh <- err
- }
- onFailure := func() { close(errCh) }
-
- // UpdateClientConnState can race with Close, and when the latter wins, the
- // serializer is closed, and the attempt to schedule the callback will fail.
- // It is acceptable to ignore this failure. But since we want to handle the
- // state update in a blocking fashion (when we successfully schedule the
- // callback), we have to use the ScheduleOr method and not the MaybeSchedule
- // method on the serializer.
- ccb.serializer.ScheduleOr(uccs, onFailure)
- return <-errCh
-}
-
-// resolverError is invoked by grpc to push a resolver error to the underlying
-// balancer. The call to the balancer is executed from the serializer.
-func (ccb *ccBalancerWrapper) resolverError(err error) {
- ccb.serializer.TrySchedule(func(ctx context.Context) {
- if ctx.Err() != nil || ccb.balancer == nil {
- return
- }
- ccb.balancer.ResolverError(err)
- })
-}
-
-// close initiates async shutdown of the wrapper. cc.mu must be held when
-// calling this function. To determine the wrapper has finished shutting down,
-// the channel should block on ccb.serializer.Done() without cc.mu held.
-func (ccb *ccBalancerWrapper) close() {
- ccb.mu.Lock()
- ccb.closed = true
- ccb.mu.Unlock()
- channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing")
- ccb.serializer.TrySchedule(func(context.Context) {
- if ccb.balancer == nil {
- return
- }
- ccb.balancer.Close()
- ccb.balancer = nil
- })
- ccb.serializerCancel()
-}
-
-// exitIdle invokes the balancer's exitIdle method in the serializer.
-func (ccb *ccBalancerWrapper) exitIdle() {
- ccb.serializer.TrySchedule(func(ctx context.Context) {
- if ctx.Err() != nil || ccb.balancer == nil {
- return
- }
- ccb.balancer.ExitIdle()
- })
-}
-
-func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
- ccb.cc.mu.Lock()
- defer ccb.cc.mu.Unlock()
-
- ccb.mu.Lock()
- if ccb.closed {
- ccb.mu.Unlock()
- return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed")
- }
- ccb.mu.Unlock()
-
- if len(addrs) == 0 {
- return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
- }
- ac, err := ccb.cc.newAddrConnLocked(addrs, opts)
- if err != nil {
- channelz.Warningf(logger, ccb.cc.channelz, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
- return nil, err
- }
- acbw := &acBalancerWrapper{
- ccb: ccb,
- ac: ac,
- producers: make(map[balancer.ProducerBuilder]*refCountedProducer),
- stateListener: opts.StateListener,
- healthData: newHealthData(connectivity.Idle),
- }
- ac.acbw = acbw
- return acbw, nil
-}
-
-func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) {
- // The graceful switch balancer will never call this.
- logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
-}
-
-func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
- acbw, ok := sc.(*acBalancerWrapper)
- if !ok {
- return
- }
- acbw.UpdateAddresses(addrs)
-}
-
-func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
- ccb.cc.mu.Lock()
- defer ccb.cc.mu.Unlock()
- if ccb.cc.conns == nil {
- // The CC has been closed; ignore this update.
- return
- }
-
- ccb.mu.Lock()
- if ccb.closed {
- ccb.mu.Unlock()
- return
- }
- ccb.mu.Unlock()
- // Update picker before updating state. Even though the ordering here does
- // not matter, it can lead to multiple calls of Pick in the common start-up
- // case where we wait for ready and then perform an RPC. If the picker is
- // updated later, we could call the "connecting" picker when the state is
- // updated, and then call the "ready" picker after the picker gets updated.
-
- // Note that there is no need to check if the balancer wrapper was closed,
- // as we know the graceful switch LB policy will not call cc if it has been
- // closed.
- ccb.cc.pickerWrapper.updatePicker(s.Picker)
- ccb.cc.csMgr.updateState(s.ConnectivityState)
-}
-
-func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
- ccb.cc.mu.RLock()
- defer ccb.cc.mu.RUnlock()
-
- ccb.mu.Lock()
- if ccb.closed {
- ccb.mu.Unlock()
- return
- }
- ccb.mu.Unlock()
- ccb.cc.resolveNowLocked(o)
-}
-
-func (ccb *ccBalancerWrapper) Target() string {
- return ccb.cc.target
-}
-
-// acBalancerWrapper is a wrapper on top of ac for balancers.
-// It implements balancer.SubConn interface.
-type acBalancerWrapper struct {
- internal.EnforceSubConnEmbedding
- ac *addrConn // read-only
- ccb *ccBalancerWrapper // read-only
- stateListener func(balancer.SubConnState)
-
- producersMu sync.Mutex
- producers map[balancer.ProducerBuilder]*refCountedProducer
-
- // Access to healthData is protected by healthMu.
- healthMu sync.Mutex
- // healthData is stored as a pointer to detect when the health listener is
- // dropped or updated. This is required as closures can't be compared for
- // equality.
- healthData *healthData
-}
-
-// healthData holds data related to health state reporting.
-type healthData struct {
- // connectivityState stores the most recent connectivity state delivered
- // to the LB policy. This is stored to avoid sending updates when the
- // SubConn has already exited connectivity state READY.
- connectivityState connectivity.State
- // closeHealthProducer stores function to close the ref counted health
- // producer. The health producer is automatically closed when the SubConn
- // state changes.
- closeHealthProducer func()
-}
-
-func newHealthData(s connectivity.State) *healthData {
- return &healthData{
- connectivityState: s,
- closeHealthProducer: func() {},
- }
-}
-
-// updateState is invoked by grpc to push a subConn state update to the
-// underlying balancer.
-func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) {
- acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
- if ctx.Err() != nil || acbw.ccb.balancer == nil {
- return
- }
- // Invalidate all producers on any state change.
- acbw.closeProducers()
-
- // Even though it is optional for balancers, gracefulswitch ensures
- // opts.StateListener is set, so this cannot ever be nil.
- // TODO: delete this comment when UpdateSubConnState is removed.
- scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err}
- if s == connectivity.Ready {
- setConnectedAddress(&scs, curAddr)
- }
- // Invalidate the health listener by updating the healthData.
- acbw.healthMu.Lock()
- // A race may occur if a health listener is registered soon after the
- // connectivity state is set but before the stateListener is called.
- // Two cases may arise:
- // 1. The new state is not READY: RegisterHealthListener has checks to
- // ensure no updates are sent when the connectivity state is not
- // READY.
- // 2. The new state is READY: This means that the old state wasn't Ready.
- // The RegisterHealthListener API mentions that a health listener
- // must not be registered when a SubConn is not ready to avoid such
- // races. When this happens, the LB policy would get health updates
- // on the old listener. When the LB policy registers a new listener
- // on receiving the connectivity update, the health updates will be
- // sent to the new health listener.
- acbw.healthData = newHealthData(scs.ConnectivityState)
- acbw.healthMu.Unlock()
-
- acbw.stateListener(scs)
- })
-}
-
-func (acbw *acBalancerWrapper) String() string {
- return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelz.ID)
-}
-
-func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
- acbw.ac.updateAddrs(addrs)
-}
-
-func (acbw *acBalancerWrapper) Connect() {
- go acbw.ac.connect()
-}
-
-func (acbw *acBalancerWrapper) Shutdown() {
- acbw.closeProducers()
- acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
-}
-
-// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
-// ready, blocks until it is or ctx expires. Returns an error when the context
-// expires or the addrConn is shut down.
-func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
- transport := acbw.ac.getReadyTransport()
- if transport == nil {
- return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready")
-
- }
- return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
-}
-
-// Invoke performs a unary RPC. If the addrConn is not ready, returns
-// errSubConnNotReady.
-func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error {
- cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
- if err != nil {
- return err
- }
- if err := cs.SendMsg(args); err != nil {
- return err
- }
- return cs.RecvMsg(reply)
-}
-
-type refCountedProducer struct {
- producer balancer.Producer
- refs int // number of current refs to the producer
- close func() // underlying producer's close function
-}
-
-func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
- acbw.producersMu.Lock()
- defer acbw.producersMu.Unlock()
-
- // Look up existing producer from this builder.
- pData := acbw.producers[pb]
- if pData == nil {
- // Not found; create a new one and add it to the producers map.
- p, closeFn := pb.Build(acbw)
- pData = &refCountedProducer{producer: p, close: closeFn}
- acbw.producers[pb] = pData
- }
- // Account for this new reference.
- pData.refs++
-
- // Return a cleanup function wrapped in a OnceFunc to remove this reference
- // and delete the refCountedProducer from the map if the total reference
- // count goes to zero.
- unref := func() {
- acbw.producersMu.Lock()
- // If closeProducers has already closed this producer instance, refs is
- // set to 0, so the check after decrementing will never pass, and the
- // producer will not be double-closed.
- pData.refs--
- if pData.refs == 0 {
- defer pData.close() // Run outside the acbw mutex
- delete(acbw.producers, pb)
- }
- acbw.producersMu.Unlock()
- }
- return pData.producer, sync.OnceFunc(unref)
-}
-
-func (acbw *acBalancerWrapper) closeProducers() {
- acbw.producersMu.Lock()
- defer acbw.producersMu.Unlock()
- for pb, pData := range acbw.producers {
- pData.refs = 0
- pData.close()
- delete(acbw.producers, pb)
- }
-}
-
-// healthProducerRegisterFn is a type alias for the health producer's function
-// for registering listeners.
-type healthProducerRegisterFn = func(context.Context, balancer.SubConn, string, func(balancer.SubConnState)) func()
-
-// healthListenerRegFn returns a function to register a listener for health
-// updates. If client side health checks are disabled, the registered listener
-// will get a single READY (raw connectivity state) update.
-//
-// Client side health checking is enabled when all the following
-// conditions are satisfied:
-// 1. Health checking is not disabled using the dial option.
-// 2. The health package is imported.
-// 3. The health check config is present in the service config.
-func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(balancer.SubConnState)) func() {
- if acbw.ccb.cc.dopts.disableHealthCheck {
- return noOpRegisterHealthListenerFn
- }
- regHealthLisFn := internal.RegisterClientHealthCheckListener
- if regHealthLisFn == nil {
- // The health package is not imported.
- return noOpRegisterHealthListenerFn
- }
- cfg := acbw.ac.cc.healthCheckConfig()
- if cfg == nil {
- return noOpRegisterHealthListenerFn
- }
- return func(ctx context.Context, listener func(balancer.SubConnState)) func() {
- return regHealthLisFn.(healthProducerRegisterFn)(ctx, acbw, cfg.ServiceName, listener)
- }
-}
-
-// RegisterHealthListener accepts a health listener from the LB policy. It sends
-// updates to the health listener as long as the SubConn's connectivity state
-// doesn't change and a new health listener is not registered. To invalidate
-// the currently registered health listener, acbw updates the healthData. If a
-// nil listener is registered, the active health listener is dropped.
-func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) {
- acbw.healthMu.Lock()
- defer acbw.healthMu.Unlock()
- acbw.healthData.closeHealthProducer()
- // listeners should not be registered when the connectivity state
- // isn't Ready. This may happen when the balancer registers a listener
- // after the connectivityState is updated, but before it is notified
- // of the update.
- if acbw.healthData.connectivityState != connectivity.Ready {
- return
- }
- // Replace the health data to stop sending updates to any previously
- // registered health listeners.
- hd := newHealthData(connectivity.Ready)
- acbw.healthData = hd
- if listener == nil {
- return
- }
-
- registerFn := acbw.healthListenerRegFn()
- acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
- if ctx.Err() != nil || acbw.ccb.balancer == nil {
- return
- }
- // Don't send updates if a new listener is registered.
- acbw.healthMu.Lock()
- defer acbw.healthMu.Unlock()
- if acbw.healthData != hd {
- return
- }
- // Serialize the health updates from the health producer with
- // other calls into the LB policy.
- listenerWrapper := func(scs balancer.SubConnState) {
- acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
- if ctx.Err() != nil || acbw.ccb.balancer == nil {
- return
- }
- acbw.healthMu.Lock()
- defer acbw.healthMu.Unlock()
- if acbw.healthData != hd {
- return
- }
- listener(scs)
- })
- }
-
- hd.closeHealthProducer = registerFn(ctx, listenerWrapper)
- })
-}
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
deleted file mode 100644
index b2f8fc7f4..000000000
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ /dev/null
@@ -1,1069 +0,0 @@
-// Copyright 2018 The gRPC Authors
-// All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// The canonical version of this proto can be found at
-// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.36.4
-// protoc v5.27.1
-// source: grpc/binlog/v1/binarylog.proto
-
-package grpc_binarylog_v1
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- durationpb "google.golang.org/protobuf/types/known/durationpb"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
- reflect "reflect"
- sync "sync"
- unsafe "unsafe"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Enumerates the type of event
-// Note the terminology is different from the RPC semantics
-// definition, but the same meaning is expressed here.
-type GrpcLogEntry_EventType int32
-
-const (
- GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0
- // Header sent from client to server
- GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1
- // Header sent from server to client
- GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2
- // Message sent from client to server
- GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3
- // Message sent from server to client
- GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4
- // A signal that client is done sending
- GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5
- // Trailer indicates the end of the RPC.
- // On client side, this event means a trailer was either received
- // from the network or the gRPC library locally generated a status
- // to inform the application about a failure.
- // On server side, this event means the server application requested
- // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after
- // this due to races on server side.
- GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6
- // A signal that the RPC is cancelled. On client side, this
- // indicates the client application requests a cancellation.
- // On server side, this indicates that cancellation was detected.
- // Note: This marks the end of the RPC. Events may arrive after
- // this due to races. For example, on client side a trailer
- // may arrive even though the application requested to cancel the RPC.
- GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7
-)
-
-// Enum value maps for GrpcLogEntry_EventType.
-var (
- GrpcLogEntry_EventType_name = map[int32]string{
- 0: "EVENT_TYPE_UNKNOWN",
- 1: "EVENT_TYPE_CLIENT_HEADER",
- 2: "EVENT_TYPE_SERVER_HEADER",
- 3: "EVENT_TYPE_CLIENT_MESSAGE",
- 4: "EVENT_TYPE_SERVER_MESSAGE",
- 5: "EVENT_TYPE_CLIENT_HALF_CLOSE",
- 6: "EVENT_TYPE_SERVER_TRAILER",
- 7: "EVENT_TYPE_CANCEL",
- }
- GrpcLogEntry_EventType_value = map[string]int32{
- "EVENT_TYPE_UNKNOWN": 0,
- "EVENT_TYPE_CLIENT_HEADER": 1,
- "EVENT_TYPE_SERVER_HEADER": 2,
- "EVENT_TYPE_CLIENT_MESSAGE": 3,
- "EVENT_TYPE_SERVER_MESSAGE": 4,
- "EVENT_TYPE_CLIENT_HALF_CLOSE": 5,
- "EVENT_TYPE_SERVER_TRAILER": 6,
- "EVENT_TYPE_CANCEL": 7,
- }
-)
-
-func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType {
- p := new(GrpcLogEntry_EventType)
- *p = x
- return p
-}
-
-func (x GrpcLogEntry_EventType) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor {
- return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor()
-}
-
-func (GrpcLogEntry_EventType) Type() protoreflect.EnumType {
- return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0]
-}
-
-func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead.
-func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) {
- return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0}
-}
-
-// Enumerates the entity that generates the log entry
-type GrpcLogEntry_Logger int32
-
-const (
- GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0
- GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1
- GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2
-)
-
-// Enum value maps for GrpcLogEntry_Logger.
-var (
- GrpcLogEntry_Logger_name = map[int32]string{
- 0: "LOGGER_UNKNOWN",
- 1: "LOGGER_CLIENT",
- 2: "LOGGER_SERVER",
- }
- GrpcLogEntry_Logger_value = map[string]int32{
- "LOGGER_UNKNOWN": 0,
- "LOGGER_CLIENT": 1,
- "LOGGER_SERVER": 2,
- }
-)
-
-func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger {
- p := new(GrpcLogEntry_Logger)
- *p = x
- return p
-}
-
-func (x GrpcLogEntry_Logger) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor {
- return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor()
-}
-
-func (GrpcLogEntry_Logger) Type() protoreflect.EnumType {
- return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1]
-}
-
-func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead.
-func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) {
- return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1}
-}
-
-type Address_Type int32
-
-const (
- Address_TYPE_UNKNOWN Address_Type = 0
- // address is in 1.2.3.4 form
- Address_TYPE_IPV4 Address_Type = 1
- // address is in IPv6 canonical form (RFC5952 section 4)
- // The scope is NOT included in the address string.
- Address_TYPE_IPV6 Address_Type = 2
- // address is UDS string
- Address_TYPE_UNIX Address_Type = 3
-)
-
-// Enum value maps for Address_Type.
-var (
- Address_Type_name = map[int32]string{
- 0: "TYPE_UNKNOWN",
- 1: "TYPE_IPV4",
- 2: "TYPE_IPV6",
- 3: "TYPE_UNIX",
- }
- Address_Type_value = map[string]int32{
- "TYPE_UNKNOWN": 0,
- "TYPE_IPV4": 1,
- "TYPE_IPV6": 2,
- "TYPE_UNIX": 3,
- }
-)
-
-func (x Address_Type) Enum() *Address_Type {
- p := new(Address_Type)
- *p = x
- return p
-}
-
-func (x Address_Type) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (Address_Type) Descriptor() protoreflect.EnumDescriptor {
- return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor()
-}
-
-func (Address_Type) Type() protoreflect.EnumType {
- return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2]
-}
-
-func (x Address_Type) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use Address_Type.Descriptor instead.
-func (Address_Type) EnumDescriptor() ([]byte, []int) {
- return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0}
-}
-
-// Log entry we store in binary logs
-type GrpcLogEntry struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // The timestamp of the binary log message
- Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
- // Uniquely identifies a call. The value must not be 0 in order to disambiguate
- // from an unset value.
- // Each call may have several log entries, they will all have the same call_id.
- // Nothing is guaranteed about their value other than they are unique across
- // different RPCs in the same gRPC process.
- CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"`
- // The entry sequence id for this call. The first GrpcLogEntry has a
- // value of 1, to disambiguate from an unset value. The purpose of
- // this field is to detect missing entries in environments where
- // durability or ordering is not guaranteed.
- SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"`
- Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"`
- Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum
- // The logger uses one of the following fields to record the payload,
- // according to the type of the log entry.
- //
- // Types that are valid to be assigned to Payload:
- //
- // *GrpcLogEntry_ClientHeader
- // *GrpcLogEntry_ServerHeader
- // *GrpcLogEntry_Message
- // *GrpcLogEntry_Trailer
- Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"`
- // true if payload does not represent the full message or metadata.
- PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"`
- // Peer address information, will only be recorded on the first
- // incoming event. On client side, peer is logged on
- // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in
- // the case of trailers-only. On server side, peer is always
- // logged on EVENT_TYPE_CLIENT_HEADER.
- Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *GrpcLogEntry) Reset() {
- *x = GrpcLogEntry{}
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *GrpcLogEntry) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GrpcLogEntry) ProtoMessage() {}
-
-func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message {
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead.
-func (*GrpcLogEntry) Descriptor() ([]byte, []int) {
- return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp {
- if x != nil {
- return x.Timestamp
- }
- return nil
-}
-
-func (x *GrpcLogEntry) GetCallId() uint64 {
- if x != nil {
- return x.CallId
- }
- return 0
-}
-
-func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 {
- if x != nil {
- return x.SequenceIdWithinCall
- }
- return 0
-}
-
-func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType {
- if x != nil {
- return x.Type
- }
- return GrpcLogEntry_EVENT_TYPE_UNKNOWN
-}
-
-func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
- if x != nil {
- return x.Logger
- }
- return GrpcLogEntry_LOGGER_UNKNOWN
-}
-
-func (x *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
- if x != nil {
- return x.Payload
- }
- return nil
-}
-
-func (x *GrpcLogEntry) GetClientHeader() *ClientHeader {
- if x != nil {
- if x, ok := x.Payload.(*GrpcLogEntry_ClientHeader); ok {
- return x.ClientHeader
- }
- }
- return nil
-}
-
-func (x *GrpcLogEntry) GetServerHeader() *ServerHeader {
- if x != nil {
- if x, ok := x.Payload.(*GrpcLogEntry_ServerHeader); ok {
- return x.ServerHeader
- }
- }
- return nil
-}
-
-func (x *GrpcLogEntry) GetMessage() *Message {
- if x != nil {
- if x, ok := x.Payload.(*GrpcLogEntry_Message); ok {
- return x.Message
- }
- }
- return nil
-}
-
-func (x *GrpcLogEntry) GetTrailer() *Trailer {
- if x != nil {
- if x, ok := x.Payload.(*GrpcLogEntry_Trailer); ok {
- return x.Trailer
- }
- }
- return nil
-}
-
-func (x *GrpcLogEntry) GetPayloadTruncated() bool {
- if x != nil {
- return x.PayloadTruncated
- }
- return false
-}
-
-func (x *GrpcLogEntry) GetPeer() *Address {
- if x != nil {
- return x.Peer
- }
- return nil
-}
-
-type isGrpcLogEntry_Payload interface {
- isGrpcLogEntry_Payload()
-}
-
-type GrpcLogEntry_ClientHeader struct {
- ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"`
-}
-
-type GrpcLogEntry_ServerHeader struct {
- ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"`
-}
-
-type GrpcLogEntry_Message struct {
- // Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE
- Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"`
-}
-
-type GrpcLogEntry_Trailer struct {
- Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"`
-}
-
-func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {}
-
-func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {}
-
-func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {}
-
-func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {}
-
-type ClientHeader struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // This contains only the metadata from the application.
- Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
- // The name of the RPC method, which looks something like:
- // //
- // Note the leading "/" character.
- MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
- // A single process may be used to run multiple virtual
- // servers with different identities.
- // The authority is the name of such a server identity.
- // It is typically a portion of the URI in the form of
- // or : .
- Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
- // the RPC timeout
- Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *ClientHeader) Reset() {
- *x = ClientHeader{}
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *ClientHeader) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ClientHeader) ProtoMessage() {}
-
-func (x *ClientHeader) ProtoReflect() protoreflect.Message {
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead.
-func (*ClientHeader) Descriptor() ([]byte, []int) {
- return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *ClientHeader) GetMetadata() *Metadata {
- if x != nil {
- return x.Metadata
- }
- return nil
-}
-
-func (x *ClientHeader) GetMethodName() string {
- if x != nil {
- return x.MethodName
- }
- return ""
-}
-
-func (x *ClientHeader) GetAuthority() string {
- if x != nil {
- return x.Authority
- }
- return ""
-}
-
-func (x *ClientHeader) GetTimeout() *durationpb.Duration {
- if x != nil {
- return x.Timeout
- }
- return nil
-}
-
-type ServerHeader struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // This contains only the metadata from the application.
- Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *ServerHeader) Reset() {
- *x = ServerHeader{}
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *ServerHeader) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ServerHeader) ProtoMessage() {}
-
-func (x *ServerHeader) ProtoReflect() protoreflect.Message {
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead.
-func (*ServerHeader) Descriptor() ([]byte, []int) {
- return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *ServerHeader) GetMetadata() *Metadata {
- if x != nil {
- return x.Metadata
- }
- return nil
-}
-
-type Trailer struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // This contains only the metadata from the application.
- Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
- // The gRPC status code.
- StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"`
- // An original status message before any transport specific
- // encoding.
- StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"`
- // The value of the 'grpc-status-details-bin' metadata key. If
- // present, this is always an encoded 'google.rpc.Status' message.
- StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *Trailer) Reset() {
- *x = Trailer{}
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *Trailer) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Trailer) ProtoMessage() {}
-
-func (x *Trailer) ProtoReflect() protoreflect.Message {
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Trailer.ProtoReflect.Descriptor instead.
-func (*Trailer) Descriptor() ([]byte, []int) {
- return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *Trailer) GetMetadata() *Metadata {
- if x != nil {
- return x.Metadata
- }
- return nil
-}
-
-func (x *Trailer) GetStatusCode() uint32 {
- if x != nil {
- return x.StatusCode
- }
- return 0
-}
-
-func (x *Trailer) GetStatusMessage() string {
- if x != nil {
- return x.StatusMessage
- }
- return ""
-}
-
-func (x *Trailer) GetStatusDetails() []byte {
- if x != nil {
- return x.StatusDetails
- }
- return nil
-}
-
-// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE
-type Message struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // Length of the message. It may not be the same as the length of the
- // data field, as the logging payload can be truncated or omitted.
- Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"`
- // May be truncated or omitted.
- Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *Message) Reset() {
- *x = Message{}
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *Message) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Message) ProtoMessage() {}
-
-func (x *Message) ProtoReflect() protoreflect.Message {
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Message.ProtoReflect.Descriptor instead.
-func (*Message) Descriptor() ([]byte, []int) {
- return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *Message) GetLength() uint32 {
- if x != nil {
- return x.Length
- }
- return 0
-}
-
-func (x *Message) GetData() []byte {
- if x != nil {
- return x.Data
- }
- return nil
-}
-
-// A list of metadata pairs, used in the payload of client header,
-// server header, and server trailer.
-// Implementations may omit some entries to honor the header limits
-// of GRPC_BINARY_LOG_CONFIG.
-//
-// Header keys added by gRPC are omitted. To be more specific,
-// implementations will not log the following entries, and this is
-// not to be treated as a truncation:
-// - entries handled by grpc that are not user visible, such as those
-// that begin with 'grpc-' (with exception of grpc-trace-bin)
-// or keys like 'lb-token'
-// - transport specific entries, including but not limited to:
-// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
-// - entries added for call credentials
-//
-// Implementations must always log grpc-trace-bin if it is present.
-// Practically speaking it will only be visible on server side because
-// grpc-trace-bin is managed by low level client side mechanisms
-// inaccessible from the application level. On server side, the
-// header is just a normal metadata key.
-// The pair will not count towards the size limit.
-type Metadata struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *Metadata) Reset() {
- *x = Metadata{}
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *Metadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Metadata) ProtoMessage() {}
-
-func (x *Metadata) ProtoReflect() protoreflect.Message {
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Metadata.ProtoReflect.Descriptor instead.
-func (*Metadata) Descriptor() ([]byte, []int) {
- return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *Metadata) GetEntry() []*MetadataEntry {
- if x != nil {
- return x.Entry
- }
- return nil
-}
-
-// A metadata key value pair
-type MetadataEntry struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *MetadataEntry) Reset() {
- *x = MetadataEntry{}
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *MetadataEntry) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MetadataEntry) ProtoMessage() {}
-
-func (x *MetadataEntry) ProtoReflect() protoreflect.Message {
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead.
-func (*MetadataEntry) Descriptor() ([]byte, []int) {
- return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *MetadataEntry) GetKey() string {
- if x != nil {
- return x.Key
- }
- return ""
-}
-
-func (x *MetadataEntry) GetValue() []byte {
- if x != nil {
- return x.Value
- }
- return nil
-}
-
-// Address information
-type Address struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
- Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
- // only for TYPE_IPV4 and TYPE_IPV6
- IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *Address) Reset() {
- *x = Address{}
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *Address) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Address) ProtoMessage() {}
-
-func (x *Address) ProtoReflect() protoreflect.Message {
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Address.ProtoReflect.Descriptor instead.
-func (*Address) Descriptor() ([]byte, []int) {
- return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *Address) GetType() Address_Type {
- if x != nil {
- return x.Type
- }
- return Address_TYPE_UNKNOWN
-}
-
-func (x *Address) GetAddress() string {
- if x != nil {
- return x.Address
- }
- return ""
-}
-
-func (x *Address) GetIpPort() uint32 {
- if x != nil {
- return x.IpPort
- }
- return 0
-}
-
-var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor
-
-var file_grpc_binlog_v1_binarylog_proto_rawDesc = string([]byte{
- 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31,
- 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
- 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
- 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
- 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75,
- 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63,
- 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65,
- 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12,
- 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e,
- 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
- 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45,
- 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e,
- 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26,
- 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e,
- 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e,
- 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46,
- 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e,
- 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
- 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
- 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
- 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
- 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
- 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00,
- 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36,
- 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
- 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d,
- 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65,
- 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62,
- 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69,
- 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b,
- 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61,
- 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f,
- 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70,
- 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63,
- 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64,
- 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09,
- 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45,
- 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
- 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12,
- 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45,
- 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a,
- 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45,
- 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19,
- 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45,
- 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45,
- 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54,
- 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a,
- 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56,
- 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11,
- 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45,
- 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a,
- 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
- 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45,
- 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53,
- 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f,
- 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61,
- 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e,
- 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b,
- 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a,
- 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74,
- 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
- 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
- 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
- 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79,
- 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
- 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72,
- 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
- 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62,
- 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61,
- 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f,
- 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12,
- 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
- 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d,
- 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d,
- 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a,
- 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67,
- 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
- 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04,
- 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
- 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
- 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61,
- 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a,
- 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72,
- 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e,
- 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79,
- 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07,
- 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69,
- 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a,
- 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
- 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d,
- 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a,
- 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14,
- 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f,
- 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
- 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62,
- 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69,
- 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x33,
-})
-
-var (
- file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once
- file_grpc_binlog_v1_binarylog_proto_rawDescData []byte
-)
-
-func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte {
- file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() {
- file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc)))
- })
- return file_grpc_binlog_v1_binarylog_proto_rawDescData
-}
-
-var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
-var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
-var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{
- (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType
- (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger
- (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type
- (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry
- (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader
- (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader
- (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer
- (*Message)(nil), // 7: grpc.binarylog.v1.Message
- (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata
- (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry
- (*Address)(nil), // 10: grpc.binarylog.v1.Address
- (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp
- (*durationpb.Duration)(nil), // 12: google.protobuf.Duration
-}
-var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{
- 11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp
- 0, // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType
- 1, // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger
- 4, // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader
- 5, // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader
- 7, // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message
- 6, // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer
- 10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address
- 8, // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata
- 12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration
- 8, // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata
- 8, // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata
- 9, // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry
- 2, // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type
- 14, // [14:14] is the sub-list for method output_type
- 14, // [14:14] is the sub-list for method input_type
- 14, // [14:14] is the sub-list for extension type_name
- 14, // [14:14] is the sub-list for extension extendee
- 0, // [0:14] is the sub-list for field type_name
-}
-
-func init() { file_grpc_binlog_v1_binarylog_proto_init() }
-func file_grpc_binlog_v1_binarylog_proto_init() {
- if File_grpc_binlog_v1_binarylog_proto != nil {
- return
- }
- file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{
- (*GrpcLogEntry_ClientHeader)(nil),
- (*GrpcLogEntry_ServerHeader)(nil),
- (*GrpcLogEntry_Message)(nil),
- (*GrpcLogEntry_Trailer)(nil),
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc)),
- NumEnums: 3,
- NumMessages: 8,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_grpc_binlog_v1_binarylog_proto_goTypes,
- DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs,
- EnumInfos: file_grpc_binlog_v1_binarylog_proto_enumTypes,
- MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes,
- }.Build()
- File_grpc_binlog_v1_binarylog_proto = out.File
- file_grpc_binlog_v1_binarylog_proto_goTypes = nil
- file_grpc_binlog_v1_binarylog_proto_depIdxs = nil
-}
diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go
deleted file mode 100644
index 788c89c16..000000000
--- a/vendor/google.golang.org/grpc/call.go
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
-)
-
-// Invoke sends the RPC request on the wire and returns after response is
-// received. This is typically called by generated code.
-//
-// All errors returned by Invoke are compatible with the status package.
-func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error {
- // allow interceptor to see all applicable call options, which means those
- // configured as defaults from dial option as well as per-call options
- opts = combine(cc.dopts.callOptions, opts)
-
- if cc.dopts.unaryInt != nil {
- return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
- }
- return invoke(ctx, method, args, reply, cc, opts...)
-}
-
-func combine(o1 []CallOption, o2 []CallOption) []CallOption {
- // we don't use append because o1 could have extra capacity whose
- // elements would be overwritten, which could cause inadvertent
- // sharing (and race conditions) between concurrent calls
- if len(o1) == 0 {
- return o2
- } else if len(o2) == 0 {
- return o1
- }
- ret := make([]CallOption, len(o1)+len(o2))
- copy(ret, o1)
- copy(ret[len(o1):], o2)
- return ret
-}
-
-// Invoke sends the RPC request on the wire and returns after response is
-// received. This is typically called by generated code.
-//
-// DEPRECATED: Use ClientConn.Invoke instead.
-func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error {
- return cc.Invoke(ctx, method, args, reply, opts...)
-}
-
-var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
-
-func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error {
- cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
- if err != nil {
- return err
- }
- if err := cs.SendMsg(req); err != nil {
- return err
- }
- return cs.RecvMsg(reply)
-}
diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go
deleted file mode 100644
index 32b7fa579..000000000
--- a/vendor/google.golang.org/grpc/channelz/channelz.go
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package channelz exports internals of the channelz implementation as required
-// by other gRPC packages.
-//
-// The implementation of the channelz spec as defined in
-// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by
-// the `internal/channelz` package.
-//
-// # Experimental
-//
-// Notice: All APIs in this package are experimental and may be removed in a
-// later release.
-package channelz
-
-import "google.golang.org/grpc/internal/channelz"
-
-// Identifier is an opaque identifier which uniquely identifies an entity in the
-// channelz database.
-type Identifier = channelz.Identifier
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
deleted file mode 100644
index a319ef979..000000000
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ /dev/null
@@ -1,1831 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
- "errors"
- "fmt"
- "math"
- "net/url"
- "slices"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/balancer/base"
- "google.golang.org/grpc/balancer/pickfirst"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/internal"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcsync"
- "google.golang.org/grpc/internal/idle"
- iresolver "google.golang.org/grpc/internal/resolver"
- "google.golang.org/grpc/internal/stats"
- "google.golang.org/grpc/internal/transport"
- "google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/serviceconfig"
- "google.golang.org/grpc/status"
-
- _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
- _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver.
- _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver.
- _ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
-)
-
-const (
- // minimum time to give a connection to complete
- minConnectTimeout = 20 * time.Second
-)
-
-var (
- // ErrClientConnClosing indicates that the operation is illegal because
- // the ClientConn is closing.
- //
- // Deprecated: this error should not be relied upon by users; use the status
- // code of Canceled instead.
- ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing")
- // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
- errConnDrain = errors.New("grpc: the connection is drained")
- // errConnClosing indicates that the connection is closing.
- errConnClosing = errors.New("grpc: the connection is closing")
- // errConnIdling indicates the connection is being closed as the channel
- // is moving to an idle mode due to inactivity.
- errConnIdling = errors.New("grpc: the connection is closing due to channel idleness")
- // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
- // service config.
- invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
- // PickFirstBalancerName is the name of the pick_first balancer.
- PickFirstBalancerName = pickfirst.Name
-)
-
-// The following errors are returned from Dial and DialContext
-var (
- // errNoTransportSecurity indicates that there is no transport security
- // being set for ClientConn. Users should either set one or explicitly
- // call WithInsecure DialOption to disable security.
- errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)")
- // errTransportCredsAndBundle indicates that creds bundle is used together
- // with other individual Transport Credentials.
- errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials")
- // errNoTransportCredsInBundle indicated that the configured creds bundle
- // returned a transport credentials which was nil.
- errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials")
- // errTransportCredentialsMissing indicates that users want to transmit
- // security information (e.g., OAuth2 token) which requires secure
- // connection on an insecure connection.
- errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
-)
-
-const (
- defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4
- defaultClientMaxSendMessageSize = math.MaxInt32
- // http2IOBufSize specifies the buffer size for sending frames.
- defaultWriteBufSize = 32 * 1024
- defaultReadBufSize = 32 * 1024
-)
-
-type defaultConfigSelector struct {
- sc *ServiceConfig
-}
-
-func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) {
- return &iresolver.RPCConfig{
- Context: rpcInfo.Context,
- MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method),
- }, nil
-}
-
-// NewClient creates a new gRPC "channel" for the target URI provided. No I/O
-// is performed. Use of the ClientConn for RPCs will automatically cause it to
-// connect. The Connect method may be called to manually create a connection,
-// but for most users this should be unnecessary.
-//
-// The target name syntax is defined in
-// https://github.com/grpc/grpc/blob/master/doc/naming.md. E.g. to use the dns
-// name resolver, a "dns:///" prefix may be applied to the target. The default
-// name resolver will be used if no scheme is detected, or if the parsed scheme
-// is not a registered name resolver. The default resolver is "dns" but can be
-// overridden using the resolver package's SetDefaultScheme.
-//
-// Examples:
-//
-// - "foo.googleapis.com:8080"
-// - "dns:///foo.googleapis.com:8080"
-// - "dns:///foo.googleapis.com"
-// - "dns:///10.0.0.213:8080"
-// - "dns:///%5B2001:db8:85a3:8d3:1319:8a2e:370:7348%5D:443"
-// - "dns://8.8.8.8/foo.googleapis.com:8080"
-// - "dns://8.8.8.8/foo.googleapis.com"
-// - "zookeeper://zk.example.com:9900/example_service"
-//
-// The DialOptions returned by WithBlock, WithTimeout,
-// WithReturnConnectionError, and FailOnNonTempDialError are ignored by this
-// function.
-func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) {
- cc := &ClientConn{
- target: target,
- conns: make(map[*addrConn]struct{}),
- dopts: defaultDialOptions(),
- }
-
- cc.retryThrottler.Store((*retryThrottler)(nil))
- cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
- cc.ctx, cc.cancel = context.WithCancel(context.Background())
-
- // Apply dial options.
- disableGlobalOpts := false
- for _, opt := range opts {
- if _, ok := opt.(*disableGlobalDialOptions); ok {
- disableGlobalOpts = true
- break
- }
- }
-
- if !disableGlobalOpts {
- for _, opt := range globalDialOptions {
- opt.apply(&cc.dopts)
- }
- }
-
- for _, opt := range opts {
- opt.apply(&cc.dopts)
- }
-
- // Determine the resolver to use.
- if err := cc.initParsedTargetAndResolverBuilder(); err != nil {
- return nil, err
- }
-
- for _, opt := range globalPerTargetDialOptions {
- opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts)
- }
-
- chainUnaryClientInterceptors(cc)
- chainStreamClientInterceptors(cc)
-
- if err := cc.validateTransportCredentials(); err != nil {
- return nil, err
- }
-
- if cc.dopts.defaultServiceConfigRawJSON != nil {
- scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts)
- if scpr.Err != nil {
- return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
- }
- cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig)
- }
- cc.keepaliveParams = cc.dopts.copts.KeepaliveParams
-
- if err = cc.initAuthority(); err != nil {
- return nil, err
- }
-
- // Register ClientConn with channelz. Note that this is only done after
- // channel creation cannot fail.
- cc.channelzRegistration(target)
- channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget)
- channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority)
-
- cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz)
- cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers)
-
- cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
-
- cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc.
- cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout)
-
- return cc, nil
-}
-
-// Dial calls DialContext(context.Background(), target, opts...).
-//
-// Deprecated: use NewClient instead. Will be supported throughout 1.x.
-func Dial(target string, opts ...DialOption) (*ClientConn, error) {
- return DialContext(context.Background(), target, opts...)
-}
-
-// DialContext calls NewClient and then exits idle mode. If WithBlock(true) is
-// used, it calls Connect and WaitForStateChange until either the context
-// expires or the state of the ClientConn is Ready.
-//
-// One subtle difference between NewClient and Dial and DialContext is that the
-// former uses "dns" as the default name resolver, while the latter use
-// "passthrough" for backward compatibility. This distinction should not matter
-// to most users, but could matter to legacy users that specify a custom dialer
-// and expect it to receive the target string directly.
-//
-// Deprecated: use NewClient instead. Will be supported throughout 1.x.
-func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
- // At the end of this method, we kick the channel out of idle, rather than
- // waiting for the first rpc.
- //
- // WithLocalDNSResolution dial option in `grpc.Dial` ensures that it
- // preserves behavior: when default scheme passthrough is used, skip
- // hostname resolution, when "dns" is used for resolution, perform
- // resolution on the client.
- opts = append([]DialOption{withDefaultScheme("passthrough"), WithLocalDNSResolution()}, opts...)
- cc, err := NewClient(target, opts...)
- if err != nil {
- return nil, err
- }
-
- // We start the channel off in idle mode, but kick it out of idle now,
- // instead of waiting for the first RPC. This is the legacy behavior of
- // Dial.
- defer func() {
- if err != nil {
- cc.Close()
- }
- }()
-
- // This creates the name resolver, load balancer, etc.
- if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
- return nil, err
- }
-
- // Return now for non-blocking dials.
- if !cc.dopts.block {
- return cc, nil
- }
-
- if cc.dopts.timeout > 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout)
- defer cancel()
- }
- defer func() {
- select {
- case <-ctx.Done():
- switch {
- case ctx.Err() == err:
- conn = nil
- case err == nil || !cc.dopts.returnLastError:
- conn, err = nil, ctx.Err()
- default:
- conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err)
- }
- default:
- }
- }()
-
- // A blocking dial blocks until the clientConn is ready.
- for {
- s := cc.GetState()
- if s == connectivity.Idle {
- cc.Connect()
- }
- if s == connectivity.Ready {
- return cc, nil
- } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
- if err = cc.connectionError(); err != nil {
- terr, ok := err.(interface {
- Temporary() bool
- })
- if ok && !terr.Temporary() {
- return nil, err
- }
- }
- }
- if !cc.WaitForStateChange(ctx, s) {
- // ctx got timeout or canceled.
- if err = cc.connectionError(); err != nil && cc.dopts.returnLastError {
- return nil, err
- }
- return nil, ctx.Err()
- }
- }
-}
-
-// addTraceEvent is a helper method to add a trace event on the channel. If the
-// channel is a nested one, the same event is also added on the parent channel.
-func (cc *ClientConn) addTraceEvent(msg string) {
- ted := &channelz.TraceEvent{
- Desc: fmt.Sprintf("Channel %s", msg),
- Severity: channelz.CtInfo,
- }
- if cc.dopts.channelzParent != nil {
- ted.Parent = &channelz.TraceEvent{
- Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelz.ID, msg),
- Severity: channelz.CtInfo,
- }
- }
- channelz.AddTraceEvent(logger, cc.channelz, 0, ted)
-}
-
-type idler ClientConn
-
-func (i *idler) EnterIdleMode() {
- (*ClientConn)(i).enterIdleMode()
-}
-
-func (i *idler) ExitIdleMode() error {
- return (*ClientConn)(i).exitIdleMode()
-}
-
-// exitIdleMode moves the channel out of idle mode by recreating the name
-// resolver and load balancer. This should never be called directly; use
-// cc.idlenessMgr.ExitIdleMode instead.
-func (cc *ClientConn) exitIdleMode() (err error) {
- cc.mu.Lock()
- if cc.conns == nil {
- cc.mu.Unlock()
- return errConnClosing
- }
- cc.mu.Unlock()
-
- // This needs to be called without cc.mu because this builds a new resolver
- // which might update state or report error inline, which would then need to
- // acquire cc.mu.
- if err := cc.resolverWrapper.start(); err != nil {
- return err
- }
-
- cc.addTraceEvent("exiting idle mode")
- return nil
-}
-
-// initIdleStateLocked initializes common state to how it should be while idle.
-func (cc *ClientConn) initIdleStateLocked() {
- cc.resolverWrapper = newCCResolverWrapper(cc)
- cc.balancerWrapper = newCCBalancerWrapper(cc)
- cc.firstResolveEvent = grpcsync.NewEvent()
- // cc.conns == nil is a proxy for the ClientConn being closed. So, instead
- // of setting it to nil here, we recreate the map. This also means that we
- // don't have to do this when exiting idle mode.
- cc.conns = make(map[*addrConn]struct{})
-}
-
-// enterIdleMode puts the channel in idle mode, and as part of it shuts down the
-// name resolver, load balancer, and any subchannels. This should never be
-// called directly; use cc.idlenessMgr.EnterIdleMode instead.
-func (cc *ClientConn) enterIdleMode() {
- cc.mu.Lock()
-
- if cc.conns == nil {
- cc.mu.Unlock()
- return
- }
-
- conns := cc.conns
-
- rWrapper := cc.resolverWrapper
- rWrapper.close()
- cc.pickerWrapper.reset()
- bWrapper := cc.balancerWrapper
- bWrapper.close()
- cc.csMgr.updateState(connectivity.Idle)
- cc.addTraceEvent("entering idle mode")
-
- cc.initIdleStateLocked()
-
- cc.mu.Unlock()
-
- // Block until the name resolver and LB policy are closed.
- <-rWrapper.serializer.Done()
- <-bWrapper.serializer.Done()
-
- // Close all subchannels after the LB policy is closed.
- for ac := range conns {
- ac.tearDown(errConnIdling)
- }
-}
-
-// validateTransportCredentials performs a series of checks on the configured
-// transport credentials. It returns a non-nil error if any of these conditions
-// are met:
-// - no transport creds and no creds bundle is configured
-// - both transport creds and creds bundle are configured
-// - creds bundle is configured, but it lacks a transport credentials
-// - insecure transport creds configured alongside call creds that require
-// transport level security
-//
-// If none of the above conditions are met, the configured credentials are
-// deemed valid and a nil error is returned.
-func (cc *ClientConn) validateTransportCredentials() error {
- if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
- return errNoTransportSecurity
- }
- if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
- return errTransportCredsAndBundle
- }
- if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil {
- return errNoTransportCredsInBundle
- }
- transportCreds := cc.dopts.copts.TransportCredentials
- if transportCreds == nil {
- transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials()
- }
- if transportCreds.Info().SecurityProtocol == "insecure" {
- for _, cd := range cc.dopts.copts.PerRPCCredentials {
- if cd.RequireTransportSecurity() {
- return errTransportCredentialsMissing
- }
- }
- }
- return nil
-}
-
-// channelzRegistration registers the newly created ClientConn with channelz and
-// stores the returned identifier in `cc.channelz`. A channelz trace event is
-// emitted for ClientConn creation. If the newly created ClientConn is a nested
-// one, i.e a valid parent ClientConn ID is specified via a dial option, the
-// trace event is also added to the parent.
-//
-// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
-func (cc *ClientConn) channelzRegistration(target string) {
- parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel)
- cc.channelz = channelz.RegisterChannel(parentChannel, target)
- cc.addTraceEvent("created")
-}
-
-// chainUnaryClientInterceptors chains all unary client interceptors into one.
-func chainUnaryClientInterceptors(cc *ClientConn) {
- interceptors := cc.dopts.chainUnaryInts
- // Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will
- // be executed before any other chained interceptors.
- if cc.dopts.unaryInt != nil {
- interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...)
- }
- var chainedInt UnaryClientInterceptor
- if len(interceptors) == 0 {
- chainedInt = nil
- } else if len(interceptors) == 1 {
- chainedInt = interceptors[0]
- } else {
- chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error {
- return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...)
- }
- }
- cc.dopts.unaryInt = chainedInt
-}
-
-// getChainUnaryInvoker recursively generate the chained unary invoker.
-func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker {
- if curr == len(interceptors)-1 {
- return finalInvoker
- }
- return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error {
- return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...)
- }
-}
-
-// chainStreamClientInterceptors chains all stream client interceptors into one.
-func chainStreamClientInterceptors(cc *ClientConn) {
- interceptors := cc.dopts.chainStreamInts
- // Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will
- // be executed before any other chained interceptors.
- if cc.dopts.streamInt != nil {
- interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...)
- }
- var chainedInt StreamClientInterceptor
- if len(interceptors) == 0 {
- chainedInt = nil
- } else if len(interceptors) == 1 {
- chainedInt = interceptors[0]
- } else {
- chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) {
- return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...)
- }
- }
- cc.dopts.streamInt = chainedInt
-}
-
-// getChainStreamer recursively generate the chained client stream constructor.
-func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer {
- if curr == len(interceptors)-1 {
- return finalStreamer
- }
- return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
- return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...)
- }
-}
-
-// newConnectivityStateManager creates an connectivityStateManager with
-// the specified channel.
-func newConnectivityStateManager(ctx context.Context, channel *channelz.Channel) *connectivityStateManager {
- return &connectivityStateManager{
- channelz: channel,
- pubSub: grpcsync.NewPubSub(ctx),
- }
-}
-
-// connectivityStateManager keeps the connectivity.State of ClientConn.
-// This struct will eventually be exported so the balancers can access it.
-//
-// TODO: If possible, get rid of the `connectivityStateManager` type, and
-// provide this functionality using the `PubSub`, to avoid keeping track of
-// the connectivity state at two places.
-type connectivityStateManager struct {
- mu sync.Mutex
- state connectivity.State
- notifyChan chan struct{}
- channelz *channelz.Channel
- pubSub *grpcsync.PubSub
-}
-
-// updateState updates the connectivity.State of ClientConn.
-// If there's a change it notifies goroutines waiting on state change to
-// happen.
-func (csm *connectivityStateManager) updateState(state connectivity.State) {
- csm.mu.Lock()
- defer csm.mu.Unlock()
- if csm.state == connectivity.Shutdown {
- return
- }
- if csm.state == state {
- return
- }
- csm.state = state
- csm.channelz.ChannelMetrics.State.Store(&state)
- csm.pubSub.Publish(state)
-
- channelz.Infof(logger, csm.channelz, "Channel Connectivity change to %v", state)
- if csm.notifyChan != nil {
- // There are other goroutines waiting on this channel.
- close(csm.notifyChan)
- csm.notifyChan = nil
- }
-}
-
-func (csm *connectivityStateManager) getState() connectivity.State {
- csm.mu.Lock()
- defer csm.mu.Unlock()
- return csm.state
-}
-
-func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} {
- csm.mu.Lock()
- defer csm.mu.Unlock()
- if csm.notifyChan == nil {
- csm.notifyChan = make(chan struct{})
- }
- return csm.notifyChan
-}
-
-// ClientConnInterface defines the functions clients need to perform unary and
-// streaming RPCs. It is implemented by *ClientConn, and is only intended to
-// be referenced by generated code.
-type ClientConnInterface interface {
- // Invoke performs a unary RPC and returns after the response is received
- // into reply.
- Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error
- // NewStream begins a streaming RPC.
- NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error)
-}
-
-// Assert *ClientConn implements ClientConnInterface.
-var _ ClientConnInterface = (*ClientConn)(nil)
-
-// ClientConn represents a virtual connection to a conceptual endpoint, to
-// perform RPCs.
-//
-// A ClientConn is free to have zero or more actual connections to the endpoint
-// based on configuration, load, etc. It is also free to determine which actual
-// endpoints to use and may change it every RPC, permitting client-side load
-// balancing.
-//
-// A ClientConn encapsulates a range of functionality including name
-// resolution, TCP connection establishment (with retries and backoff) and TLS
-// handshakes. It also handles errors on established connections by
-// re-resolving the name and reconnecting.
-type ClientConn struct {
- ctx context.Context // Initialized using the background context at dial time.
- cancel context.CancelFunc // Cancelled on close.
-
- // The following are initialized at dial time, and are read-only after that.
- target string // User's dial target.
- parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder().
- authority string // See initAuthority().
- dopts dialOptions // Default and user specified dial options.
- channelz *channelz.Channel // Channelz object.
- resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder().
- idlenessMgr *idle.Manager
- metricsRecorderList *stats.MetricsRecorderList
-
- // The following provide their own synchronization, and therefore don't
- // require cc.mu to be held to access them.
- csMgr *connectivityStateManager
- pickerWrapper *pickerWrapper
- safeConfigSelector iresolver.SafeConfigSelector
- retryThrottler atomic.Value // Updated from service config.
-
- // mu protects the following fields.
- // TODO: split mu so the same mutex isn't used for everything.
- mu sync.RWMutex
- resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close.
- balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close.
- sc *ServiceConfig // Latest service config received from the resolver.
- conns map[*addrConn]struct{} // Set to nil on close.
- keepaliveParams keepalive.ClientParameters // May be updated upon receipt of a GoAway.
- // firstResolveEvent is used to track whether the name resolver sent us at
- // least one update. RPCs block on this event. May be accessed without mu
- // if we know we cannot be asked to enter idle mode while accessing it (e.g.
- // when the idle manager has already been closed, or if we are already
- // entering idle mode).
- firstResolveEvent *grpcsync.Event
-
- lceMu sync.Mutex // protects lastConnectionError
- lastConnectionError error
-}
-
-// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
-// ctx expires. A true value is returned in former case and false in latter.
-func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
- ch := cc.csMgr.getNotifyChan()
- if cc.csMgr.getState() != sourceState {
- return true
- }
- select {
- case <-ctx.Done():
- return false
- case <-ch:
- return true
- }
-}
-
-// GetState returns the connectivity.State of ClientConn.
-func (cc *ClientConn) GetState() connectivity.State {
- return cc.csMgr.getState()
-}
-
-// Connect causes all subchannels in the ClientConn to attempt to connect if
-// the channel is idle. Does not wait for the connection attempts to begin
-// before returning.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
-// release.
-func (cc *ClientConn) Connect() {
- if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
- cc.addTraceEvent(err.Error())
- return
- }
- // If the ClientConn was not in idle mode, we need to call ExitIdle on the
- // LB policy so that connections can be created.
- cc.mu.Lock()
- cc.balancerWrapper.exitIdle()
- cc.mu.Unlock()
-}
-
-// waitForResolvedAddrs blocks until the resolver has provided addresses or the
-// context expires. Returns nil unless the context expires first; otherwise
-// returns a status error based on the context.
-func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
- // This is on the RPC path, so we use a fast path to avoid the
- // more-expensive "select" below after the resolver has returned once.
- if cc.firstResolveEvent.HasFired() {
- return nil
- }
- select {
- case <-cc.firstResolveEvent.Done():
- return nil
- case <-ctx.Done():
- return status.FromContextError(ctx.Err()).Err()
- case <-cc.ctx.Done():
- return ErrClientConnClosing
- }
-}
-
-var emptyServiceConfig *ServiceConfig
-
-func init() {
- cfg := parseServiceConfig("{}", defaultMaxCallAttempts)
- if cfg.Err != nil {
- panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err))
- }
- emptyServiceConfig = cfg.Config.(*ServiceConfig)
-
- internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() {
- return cc.csMgr.pubSub.Subscribe(s)
- }
- internal.EnterIdleModeForTesting = func(cc *ClientConn) {
- cc.idlenessMgr.EnterIdleModeForTesting()
- }
- internal.ExitIdleModeForTesting = func(cc *ClientConn) error {
- return cc.idlenessMgr.ExitIdleMode()
- }
-}
-
-func (cc *ClientConn) maybeApplyDefaultServiceConfig() {
- if cc.sc != nil {
- cc.applyServiceConfigAndBalancer(cc.sc, nil)
- return
- }
- if cc.dopts.defaultServiceConfig != nil {
- cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig})
- } else {
- cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig})
- }
-}
-
-func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error {
- defer cc.firstResolveEvent.Fire()
- // Check if the ClientConn is already closed. Some fields (e.g.
- // balancerWrapper) are set to nil when closing the ClientConn, and could
- // cause nil pointer panic if we don't have this check.
- if cc.conns == nil {
- cc.mu.Unlock()
- return nil
- }
-
- if err != nil {
- // May need to apply the initial service config in case the resolver
- // doesn't support service configs, or doesn't provide a service config
- // with the new addresses.
- cc.maybeApplyDefaultServiceConfig()
-
- cc.balancerWrapper.resolverError(err)
-
- // No addresses are valid with err set; return early.
- cc.mu.Unlock()
- return balancer.ErrBadResolverState
- }
-
- var ret error
- if cc.dopts.disableServiceConfig {
- channelz.Infof(logger, cc.channelz, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig)
- cc.maybeApplyDefaultServiceConfig()
- } else if s.ServiceConfig == nil {
- cc.maybeApplyDefaultServiceConfig()
- // TODO: do we need to apply a failing LB policy if there is no
- // default, per the error handling design?
- } else {
- if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok {
- configSelector := iresolver.GetConfigSelector(s)
- if configSelector != nil {
- if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 {
- channelz.Infof(logger, cc.channelz, "method configs in service config will be ignored due to presence of config selector")
- }
- } else {
- configSelector = &defaultConfigSelector{sc}
- }
- cc.applyServiceConfigAndBalancer(sc, configSelector)
- } else {
- ret = balancer.ErrBadResolverState
- if cc.sc == nil {
- // Apply the failing LB only if we haven't received valid service config
- // from the name resolver in the past.
- cc.applyFailingLBLocked(s.ServiceConfig)
- cc.mu.Unlock()
- return ret
- }
- }
- }
-
- balCfg := cc.sc.lbConfig
- bw := cc.balancerWrapper
- cc.mu.Unlock()
-
- uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
- if ret == nil {
- ret = uccsErr // prefer ErrBadResolver state since any other error is
- // currently meaningless to the caller.
- }
- return ret
-}
-
-// applyFailingLBLocked is akin to configuring an LB policy on the channel which
-// always fails RPCs. Here, an actual LB policy is not configured, but an always
-// erroring picker is configured, which returns errors with information about
-// what was invalid in the received service config. A config selector with no
-// service config is configured, and the connectivity state of the channel is
-// set to TransientFailure.
-func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) {
- var err error
- if sc.Err != nil {
- err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err)
- } else {
- err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config)
- }
- cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
- cc.pickerWrapper.updatePicker(base.NewErrPicker(err))
- cc.csMgr.updateState(connectivity.TransientFailure)
-}
-
-// Makes a copy of the input addresses slice. Addresses are passed during
-// subconn creation and address update operations.
-func copyAddresses(in []resolver.Address) []resolver.Address {
- out := make([]resolver.Address, len(in))
- copy(out, in)
- return out
-}
-
-// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns.
-//
-// Caller needs to make sure len(addrs) > 0.
-func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
- if cc.conns == nil {
- return nil, ErrClientConnClosing
- }
-
- ac := &addrConn{
- state: connectivity.Idle,
- cc: cc,
- addrs: copyAddresses(addrs),
- scopts: opts,
- dopts: cc.dopts,
- channelz: channelz.RegisterSubChannel(cc.channelz, ""),
- resetBackoff: make(chan struct{}),
- }
- ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
- // Start with our address set to the first address; this may be updated if
- // we connect to different addresses.
- ac.channelz.ChannelMetrics.Target.Store(&addrs[0].Addr)
-
- channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{
- Desc: "Subchannel created",
- Severity: channelz.CtInfo,
- Parent: &channelz.TraceEvent{
- Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelz.ID),
- Severity: channelz.CtInfo,
- },
- })
-
- // Track ac in cc. This needs to be done before any getTransport(...) is called.
- cc.conns[ac] = struct{}{}
- return ac, nil
-}
-
-// removeAddrConn removes the addrConn in the subConn from clientConn.
-// It also tears down the ac with the given error.
-func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) {
- cc.mu.Lock()
- if cc.conns == nil {
- cc.mu.Unlock()
- return
- }
- delete(cc.conns, ac)
- cc.mu.Unlock()
- ac.tearDown(err)
-}
-
-// Target returns the target string of the ClientConn.
-func (cc *ClientConn) Target() string {
- return cc.target
-}
-
-// CanonicalTarget returns the canonical target string used when creating cc.
-//
-// This always has the form "://[authority]/". For example:
-//
-// - "dns:///example.com:42"
-// - "dns://8.8.8.8/example.com:42"
-// - "unix:///path/to/socket"
-func (cc *ClientConn) CanonicalTarget() string {
- return cc.parsedTarget.String()
-}
-
-func (cc *ClientConn) incrCallsStarted() {
- cc.channelz.ChannelMetrics.CallsStarted.Add(1)
- cc.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
-}
-
-func (cc *ClientConn) incrCallsSucceeded() {
- cc.channelz.ChannelMetrics.CallsSucceeded.Add(1)
-}
-
-func (cc *ClientConn) incrCallsFailed() {
- cc.channelz.ChannelMetrics.CallsFailed.Add(1)
-}
-
-// connect starts creating a transport.
-// It does nothing if the ac is not IDLE.
-// TODO(bar) Move this to the addrConn section.
-func (ac *addrConn) connect() error {
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- if logger.V(2) {
- logger.Infof("connect called on shutdown addrConn; ignoring.")
- }
- ac.mu.Unlock()
- return errConnClosing
- }
- if ac.state != connectivity.Idle {
- if logger.V(2) {
- logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state)
- }
- ac.mu.Unlock()
- return nil
- }
-
- ac.resetTransportAndUnlock()
- return nil
-}
-
-// equalAddressIgnoringBalAttributes returns true is a and b are considered equal.
-// This is different from the Equal method on the resolver.Address type which
-// considers all fields to determine equality. Here, we only consider fields
-// that are meaningful to the subConn.
-func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
- return a.Addr == b.Addr && a.ServerName == b.ServerName &&
- a.Attributes.Equal(b.Attributes) &&
- a.Metadata == b.Metadata
-}
-
-func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool {
- return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) })
-}
-
-// updateAddrs updates ac.addrs with the new addresses list and handles active
-// connections or connection attempts.
-func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
- addrs = copyAddresses(addrs)
- limit := len(addrs)
- if limit > 5 {
- limit = 5
- }
- channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit])
-
- ac.mu.Lock()
- if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) {
- ac.mu.Unlock()
- return
- }
-
- ac.addrs = addrs
-
- if ac.state == connectivity.Shutdown ||
- ac.state == connectivity.TransientFailure ||
- ac.state == connectivity.Idle {
- // We were not connecting, so do nothing but update the addresses.
- ac.mu.Unlock()
- return
- }
-
- if ac.state == connectivity.Ready {
- // Try to find the connected address.
- for _, a := range addrs {
- a.ServerName = ac.cc.getServerName(a)
- if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) {
- // We are connected to a valid address, so do nothing but
- // update the addresses.
- ac.mu.Unlock()
- return
- }
- }
- }
-
- // We are either connected to the wrong address or currently connecting.
- // Stop the current iteration and restart.
-
- ac.cancel()
- ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx)
-
- // We have to defer here because GracefulClose => onClose, which requires
- // locking ac.mu.
- if ac.transport != nil {
- defer ac.transport.GracefulClose()
- ac.transport = nil
- }
-
- if len(addrs) == 0 {
- ac.updateConnectivityState(connectivity.Idle, nil)
- }
-
- // Since we were connecting/connected, we should start a new connection
- // attempt.
- go ac.resetTransportAndUnlock()
-}
-
-// getServerName determines the serverName to be used in the connection
-// handshake. The default value for the serverName is the authority on the
-// ClientConn, which either comes from the user's dial target or through an
-// authority override specified using the WithAuthority dial option. Name
-// resolvers can specify a per-address override for the serverName through the
-// resolver.Address.ServerName field which is used only if the WithAuthority
-// dial option was not used. The rationale is that per-address authority
-// overrides specified by the name resolver can represent a security risk, while
-// an override specified by the user is more dependable since they probably know
-// what they are doing.
-func (cc *ClientConn) getServerName(addr resolver.Address) string {
- if cc.dopts.authority != "" {
- return cc.dopts.authority
- }
- if addr.ServerName != "" {
- return addr.ServerName
- }
- return cc.authority
-}
-
-func getMethodConfig(sc *ServiceConfig, method string) MethodConfig {
- if sc == nil {
- return MethodConfig{}
- }
- if m, ok := sc.Methods[method]; ok {
- return m
- }
- i := strings.LastIndex(method, "/")
- if m, ok := sc.Methods[method[:i+1]]; ok {
- return m
- }
- return sc.Methods[""]
-}
-
-// GetMethodConfig gets the method config of the input method.
-// If there's an exact match for input method (i.e. /service/method), we return
-// the corresponding MethodConfig.
-// If there isn't an exact match for the input method, we look for the service's default
-// config under the service (i.e /service/) and then for the default for all services (empty string).
-//
-// If there is a default MethodConfig for the service, we return it.
-// Otherwise, we return an empty MethodConfig.
-func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
- // TODO: Avoid the locking here.
- cc.mu.RLock()
- defer cc.mu.RUnlock()
- return getMethodConfig(cc.sc, method)
-}
-
-func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
- cc.mu.RLock()
- defer cc.mu.RUnlock()
- if cc.sc == nil {
- return nil
- }
- return cc.sc.healthCheckConfig
-}
-
-func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) {
- return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{
- Ctx: ctx,
- FullMethodName: method,
- })
-}
-
-func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) {
- if sc == nil {
- // should never reach here.
- return
- }
- cc.sc = sc
- if configSelector != nil {
- cc.safeConfigSelector.UpdateConfigSelector(configSelector)
- }
-
- if cc.sc.retryThrottling != nil {
- newThrottler := &retryThrottler{
- tokens: cc.sc.retryThrottling.MaxTokens,
- max: cc.sc.retryThrottling.MaxTokens,
- thresh: cc.sc.retryThrottling.MaxTokens / 2,
- ratio: cc.sc.retryThrottling.TokenRatio,
- }
- cc.retryThrottler.Store(newThrottler)
- } else {
- cc.retryThrottler.Store((*retryThrottler)(nil))
- }
-}
-
-func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
- cc.mu.RLock()
- cc.resolverWrapper.resolveNow(o)
- cc.mu.RUnlock()
-}
-
-func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) {
- cc.resolverWrapper.resolveNow(o)
-}
-
-// ResetConnectBackoff wakes up all subchannels in transient failure and causes
-// them to attempt another connection immediately. It also resets the backoff
-// times used for subsequent attempts regardless of the current state.
-//
-// In general, this function should not be used. Typical service or network
-// outages result in a reasonable client reconnection strategy by default.
-// However, if a previously unavailable network becomes available, this may be
-// used to trigger an immediate reconnect.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func (cc *ClientConn) ResetConnectBackoff() {
- cc.mu.Lock()
- conns := cc.conns
- cc.mu.Unlock()
- for ac := range conns {
- ac.resetConnectBackoff()
- }
-}
-
-// Close tears down the ClientConn and all underlying connections.
-func (cc *ClientConn) Close() error {
- defer func() {
- cc.cancel()
- <-cc.csMgr.pubSub.Done()
- }()
-
- // Prevent calls to enter/exit idle immediately, and ensure we are not
- // currently entering/exiting idle mode.
- cc.idlenessMgr.Close()
-
- cc.mu.Lock()
- if cc.conns == nil {
- cc.mu.Unlock()
- return ErrClientConnClosing
- }
-
- conns := cc.conns
- cc.conns = nil
- cc.csMgr.updateState(connectivity.Shutdown)
-
- // We can safely unlock and continue to access all fields now as
- // cc.conns==nil, preventing any further operations on cc.
- cc.mu.Unlock()
-
- cc.resolverWrapper.close()
- // The order of closing matters here since the balancer wrapper assumes the
- // picker is closed before it is closed.
- cc.pickerWrapper.close()
- cc.balancerWrapper.close()
-
- <-cc.resolverWrapper.serializer.Done()
- <-cc.balancerWrapper.serializer.Done()
- var wg sync.WaitGroup
- for ac := range conns {
- wg.Add(1)
- go func(ac *addrConn) {
- defer wg.Done()
- ac.tearDown(ErrClientConnClosing)
- }(ac)
- }
- wg.Wait()
- cc.addTraceEvent("deleted")
- // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
- // trace reference to the entity being deleted, and thus prevent it from being
- // deleted right away.
- channelz.RemoveEntry(cc.channelz.ID)
-
- return nil
-}
-
-// addrConn is a network connection to a given address.
-type addrConn struct {
- ctx context.Context
- cancel context.CancelFunc
-
- cc *ClientConn
- dopts dialOptions
- acbw *acBalancerWrapper
- scopts balancer.NewSubConnOptions
-
- // transport is set when there's a viable transport (note: ac state may not be READY as LB channel
- // health checking may require server to report healthy to set ac to READY), and is reset
- // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway
- // is received, transport is closed, ac has been torn down).
- transport transport.ClientTransport // The current transport.
-
- // This mutex is used on the RPC path, so its usage should be minimized as
- // much as possible.
- // TODO: Find a lock-free way to retrieve the transport and state from the
- // addrConn.
- mu sync.Mutex
- curAddr resolver.Address // The current address.
- addrs []resolver.Address // All addresses that the resolver resolved to.
-
- // Use updateConnectivityState for updating addrConn's connectivity state.
- state connectivity.State
-
- backoffIdx int // Needs to be stateful for resetConnectBackoff.
- resetBackoff chan struct{}
-
- channelz *channelz.SubChannel
-}
-
-// Note: this requires a lock on ac.mu.
-func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) {
- if ac.state == s {
- return
- }
- ac.state = s
- ac.channelz.ChannelMetrics.State.Store(&s)
- if lastErr == nil {
- channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v", s)
- } else {
- channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
- }
- ac.acbw.updateState(s, ac.curAddr, lastErr)
-}
-
-// adjustParams updates parameters used to create transports upon
-// receiving a GoAway.
-func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
- switch r {
- case transport.GoAwayTooManyPings:
- v := 2 * ac.dopts.copts.KeepaliveParams.Time
- ac.cc.mu.Lock()
- if v > ac.cc.keepaliveParams.Time {
- ac.cc.keepaliveParams.Time = v
- }
- ac.cc.mu.Unlock()
- }
-}
-
-// resetTransportAndUnlock unconditionally connects the addrConn.
-//
-// ac.mu must be held by the caller, and this function will guarantee it is released.
-func (ac *addrConn) resetTransportAndUnlock() {
- acCtx := ac.ctx
- if acCtx.Err() != nil {
- ac.mu.Unlock()
- return
- }
-
- addrs := ac.addrs
- backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx)
- // This will be the duration that dial gets to finish.
- dialDuration := minConnectTimeout
- if ac.dopts.minConnectTimeout != nil {
- dialDuration = ac.dopts.minConnectTimeout()
- }
-
- if dialDuration < backoffFor {
- // Give dial more time as we keep failing to connect.
- dialDuration = backoffFor
- }
- // We can potentially spend all the time trying the first address, and
- // if the server accepts the connection and then hangs, the following
- // addresses will never be tried.
- //
- // The spec doesn't mention what should be done for multiple addresses.
- // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
- connectDeadline := time.Now().Add(dialDuration)
-
- ac.updateConnectivityState(connectivity.Connecting, nil)
- ac.mu.Unlock()
-
- if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
- // TODO: #7534 - Move re-resolution requests into the pick_first LB policy
- // to ensure one resolution request per pass instead of per subconn failure.
- ac.cc.resolveNow(resolver.ResolveNowOptions{})
- ac.mu.Lock()
- if acCtx.Err() != nil {
- // addrConn was torn down.
- ac.mu.Unlock()
- return
- }
- // After exhausting all addresses, the addrConn enters
- // TRANSIENT_FAILURE.
- ac.updateConnectivityState(connectivity.TransientFailure, err)
-
- // Backoff.
- b := ac.resetBackoff
- ac.mu.Unlock()
-
- timer := time.NewTimer(backoffFor)
- select {
- case <-timer.C:
- ac.mu.Lock()
- ac.backoffIdx++
- ac.mu.Unlock()
- case <-b:
- timer.Stop()
- case <-acCtx.Done():
- timer.Stop()
- return
- }
-
- ac.mu.Lock()
- if acCtx.Err() == nil {
- ac.updateConnectivityState(connectivity.Idle, err)
- }
- ac.mu.Unlock()
- return
- }
- // Success; reset backoff.
- ac.mu.Lock()
- ac.backoffIdx = 0
- ac.mu.Unlock()
-}
-
-// tryAllAddrs tries to create a connection to the addresses, and stop when at
-// the first successful one. It returns an error if no address was successfully
-// connected, or updates ac appropriately with the new transport.
-func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error {
- var firstConnErr error
- for _, addr := range addrs {
- ac.channelz.ChannelMetrics.Target.Store(&addr.Addr)
- if ctx.Err() != nil {
- return errConnClosing
- }
- ac.mu.Lock()
-
- ac.cc.mu.RLock()
- ac.dopts.copts.KeepaliveParams = ac.cc.keepaliveParams
- ac.cc.mu.RUnlock()
-
- copts := ac.dopts.copts
- if ac.scopts.CredsBundle != nil {
- copts.CredsBundle = ac.scopts.CredsBundle
- }
- ac.mu.Unlock()
-
- channelz.Infof(logger, ac.channelz, "Subchannel picks a new address %q to connect", addr.Addr)
-
- err := ac.createTransport(ctx, addr, copts, connectDeadline)
- if err == nil {
- return nil
- }
- if firstConnErr == nil {
- firstConnErr = err
- }
- ac.cc.updateConnectionError(err)
- }
-
- // Couldn't connect to any address.
- return firstConnErr
-}
-
-// createTransport creates a connection to addr. It returns an error if the
-// address was not successfully connected, or updates ac appropriately with the
-// new transport.
-func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
- addr.ServerName = ac.cc.getServerName(addr)
- hctx, hcancel := context.WithCancel(ctx)
-
- onClose := func(r transport.GoAwayReason) {
- ac.mu.Lock()
- defer ac.mu.Unlock()
- // adjust params based on GoAwayReason
- ac.adjustParams(r)
- if ctx.Err() != nil {
- // Already shut down or connection attempt canceled. tearDown() or
- // updateAddrs() already cleared the transport and canceled hctx
- // via ac.ctx, and we expected this connection to be closed, so do
- // nothing here.
- return
- }
- hcancel()
- if ac.transport == nil {
- // We're still connecting to this address, which could error. Do
- // not update the connectivity state or resolve; these will happen
- // at the end of the tryAllAddrs connection loop in the event of an
- // error.
- return
- }
- ac.transport = nil
- // Refresh the name resolver on any connection loss.
- ac.cc.resolveNow(resolver.ResolveNowOptions{})
- // Always go idle and wait for the LB policy to initiate a new
- // connection attempt.
- ac.updateConnectivityState(connectivity.Idle, nil)
- }
-
- connectCtx, cancel := context.WithDeadline(ctx, connectDeadline)
- defer cancel()
- copts.ChannelzParent = ac.channelz
-
- newTr, err := transport.NewHTTP2Client(connectCtx, ac.cc.ctx, addr, copts, onClose)
- if err != nil {
- if logger.V(2) {
- logger.Infof("Creating new client transport to %q: %v", addr, err)
- }
- // newTr is either nil, or closed.
- hcancel()
- channelz.Warningf(logger, ac.channelz, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err)
- return err
- }
-
- ac.mu.Lock()
- defer ac.mu.Unlock()
- if ctx.Err() != nil {
- // This can happen if the subConn was removed while in `Connecting`
- // state. tearDown() would have set the state to `Shutdown`, but
- // would not have closed the transport since ac.transport would not
- // have been set at that point.
- //
- // We run this in a goroutine because newTr.Close() calls onClose()
- // inline, which requires locking ac.mu.
- //
- // The error we pass to Close() is immaterial since there are no open
- // streams at this point, so no trailers with error details will be sent
- // out. We just need to pass a non-nil error.
- //
- // This can also happen when updateAddrs is called during a connection
- // attempt.
- go newTr.Close(transport.ErrConnClosing)
- return nil
- }
- if hctx.Err() != nil {
- // onClose was already called for this connection, but the connection
- // was successfully established first. Consider it a success and set
- // the new state to Idle.
- ac.updateConnectivityState(connectivity.Idle, nil)
- return nil
- }
- ac.curAddr = addr
- ac.transport = newTr
- ac.startHealthCheck(hctx) // Will set state to READY if appropriate.
- return nil
-}
-
-// startHealthCheck starts the health checking stream (RPC) to watch the health
-// stats of this connection if health checking is requested and configured.
-//
-// LB channel health checking is enabled when all requirements below are met:
-// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption
-// 2. internal.HealthCheckFunc is set by importing the grpc/health package
-// 3. a service config with non-empty healthCheckConfig field is provided
-// 4. the load balancer requests it
-//
-// It sets addrConn to READY if the health checking stream is not started.
-//
-// Caller must hold ac.mu.
-func (ac *addrConn) startHealthCheck(ctx context.Context) {
- var healthcheckManagingState bool
- defer func() {
- if !healthcheckManagingState {
- ac.updateConnectivityState(connectivity.Ready, nil)
- }
- }()
-
- if ac.cc.dopts.disableHealthCheck {
- return
- }
- healthCheckConfig := ac.cc.healthCheckConfig()
- if healthCheckConfig == nil {
- return
- }
- if !ac.scopts.HealthCheckEnabled {
- return
- }
- healthCheckFunc := internal.HealthCheckFunc
- if healthCheckFunc == nil {
- // The health package is not imported to set health check function.
- //
- // TODO: add a link to the health check doc in the error message.
- channelz.Error(logger, ac.channelz, "Health check is requested but health check function is not set.")
- return
- }
-
- healthcheckManagingState = true
-
- // Set up the health check helper functions.
- currentTr := ac.transport
- newStream := func(method string) (any, error) {
- ac.mu.Lock()
- if ac.transport != currentTr {
- ac.mu.Unlock()
- return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use")
- }
- ac.mu.Unlock()
- return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac)
- }
- setConnectivityState := func(s connectivity.State, lastErr error) {
- ac.mu.Lock()
- defer ac.mu.Unlock()
- if ac.transport != currentTr {
- return
- }
- ac.updateConnectivityState(s, lastErr)
- }
- // Start the health checking stream.
- go func() {
- err := healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
- if err != nil {
- if status.Code(err) == codes.Unimplemented {
- channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled")
- } else {
- channelz.Errorf(logger, ac.channelz, "Health checking failed: %v", err)
- }
- }
- }()
-}
-
-func (ac *addrConn) resetConnectBackoff() {
- ac.mu.Lock()
- close(ac.resetBackoff)
- ac.backoffIdx = 0
- ac.resetBackoff = make(chan struct{})
- ac.mu.Unlock()
-}
-
-// getReadyTransport returns the transport if ac's state is READY or nil if not.
-func (ac *addrConn) getReadyTransport() transport.ClientTransport {
- ac.mu.Lock()
- defer ac.mu.Unlock()
- if ac.state == connectivity.Ready {
- return ac.transport
- }
- return nil
-}
-
-// tearDown starts to tear down the addrConn.
-//
-// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct
-// will leak. In most cases, call cc.removeAddrConn() instead.
-func (ac *addrConn) tearDown(err error) {
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
- return
- }
- curTr := ac.transport
- ac.transport = nil
- // We have to set the state to Shutdown before anything else to prevent races
- // between setting the state and logic that waits on context cancellation / etc.
- ac.updateConnectivityState(connectivity.Shutdown, nil)
- ac.cancel()
- ac.curAddr = resolver.Address{}
-
- channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{
- Desc: "Subchannel deleted",
- Severity: channelz.CtInfo,
- Parent: &channelz.TraceEvent{
- Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelz.ID),
- Severity: channelz.CtInfo,
- },
- })
- // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
- // trace reference to the entity being deleted, and thus prevent it from
- // being deleted right away.
- channelz.RemoveEntry(ac.channelz.ID)
- ac.mu.Unlock()
-
- // We have to release the lock before the call to GracefulClose/Close here
- // because both of them call onClose(), which requires locking ac.mu.
- if curTr != nil {
- if err == errConnDrain {
- // Close the transport gracefully when the subConn is being shutdown.
- //
- // GracefulClose() may be executed multiple times if:
- // - multiple GoAway frames are received from the server
- // - there are concurrent name resolver or balancer triggered
- // address removal and GoAway
- curTr.GracefulClose()
- } else {
- // Hard close the transport when the channel is entering idle or is
- // being shutdown. In the case where the channel is being shutdown,
- // closing of transports is also taken care of by cancellation of cc.ctx.
- // But in the case where the channel is entering idle, we need to
- // explicitly close the transports here. Instead of distinguishing
- // between these two cases, it is simpler to close the transport
- // unconditionally here.
- curTr.Close(err)
- }
- }
-}
-
-type retryThrottler struct {
- max float64
- thresh float64
- ratio float64
-
- mu sync.Mutex
- tokens float64 // TODO(dfawley): replace with atomic and remove lock.
-}
-
-// throttle subtracts a retry token from the pool and returns whether a retry
-// should be throttled (disallowed) based upon the retry throttling policy in
-// the service config.
-func (rt *retryThrottler) throttle() bool {
- if rt == nil {
- return false
- }
- rt.mu.Lock()
- defer rt.mu.Unlock()
- rt.tokens--
- if rt.tokens < 0 {
- rt.tokens = 0
- }
- return rt.tokens <= rt.thresh
-}
-
-func (rt *retryThrottler) successfulRPC() {
- if rt == nil {
- return
- }
- rt.mu.Lock()
- defer rt.mu.Unlock()
- rt.tokens += rt.ratio
- if rt.tokens > rt.max {
- rt.tokens = rt.max
- }
-}
-
-func (ac *addrConn) incrCallsStarted() {
- ac.channelz.ChannelMetrics.CallsStarted.Add(1)
- ac.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
-}
-
-func (ac *addrConn) incrCallsSucceeded() {
- ac.channelz.ChannelMetrics.CallsSucceeded.Add(1)
-}
-
-func (ac *addrConn) incrCallsFailed() {
- ac.channelz.ChannelMetrics.CallsFailed.Add(1)
-}
-
-// ErrClientConnTimeout indicates that the ClientConn cannot establish the
-// underlying connections within the specified timeout.
-//
-// Deprecated: This error is never returned by grpc and should not be
-// referenced by users.
-var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
-
-// getResolver finds the scheme in the cc's resolvers or the global registry.
-// scheme should always be lowercase (typically by virtue of url.Parse()
-// performing proper RFC3986 behavior).
-func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
- for _, rb := range cc.dopts.resolvers {
- if scheme == rb.Scheme() {
- return rb
- }
- }
- return resolver.Get(scheme)
-}
-
-func (cc *ClientConn) updateConnectionError(err error) {
- cc.lceMu.Lock()
- cc.lastConnectionError = err
- cc.lceMu.Unlock()
-}
-
-func (cc *ClientConn) connectionError() error {
- cc.lceMu.Lock()
- defer cc.lceMu.Unlock()
- return cc.lastConnectionError
-}
-
-// initParsedTargetAndResolverBuilder parses the user's dial target and stores
-// the parsed target in `cc.parsedTarget`.
-//
-// The resolver to use is determined based on the scheme in the parsed target
-// and the same is stored in `cc.resolverBuilder`.
-//
-// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
-func (cc *ClientConn) initParsedTargetAndResolverBuilder() error {
- logger.Infof("original dial target is: %q", cc.target)
-
- var rb resolver.Builder
- parsedTarget, err := parseTarget(cc.target)
- if err == nil {
- rb = cc.getResolver(parsedTarget.URL.Scheme)
- if rb != nil {
- cc.parsedTarget = parsedTarget
- cc.resolverBuilder = rb
- return nil
- }
- }
-
- // We are here because the user's dial target did not contain a scheme or
- // specified an unregistered scheme. We should fallback to the default
- // scheme, except when a custom dialer is specified in which case, we should
- // always use passthrough scheme. For either case, we need to respect any overridden
- // global defaults set by the user.
- defScheme := cc.dopts.defaultScheme
- if internal.UserSetDefaultScheme {
- defScheme = resolver.GetDefaultScheme()
- }
-
- canonicalTarget := defScheme + ":///" + cc.target
-
- parsedTarget, err = parseTarget(canonicalTarget)
- if err != nil {
- return err
- }
- rb = cc.getResolver(parsedTarget.URL.Scheme)
- if rb == nil {
- return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
- }
- cc.parsedTarget = parsedTarget
- cc.resolverBuilder = rb
- return nil
-}
-
-// parseTarget uses RFC 3986 semantics to parse the given target into a
-// resolver.Target struct containing url. Query params are stripped from the
-// endpoint.
-func parseTarget(target string) (resolver.Target, error) {
- u, err := url.Parse(target)
- if err != nil {
- return resolver.Target{}, err
- }
-
- return resolver.Target{URL: *u}, nil
-}
-
-// encodeAuthority escapes the authority string based on valid chars defined in
-// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2.
-func encodeAuthority(authority string) string {
- const upperhex = "0123456789ABCDEF"
-
- // Return for characters that must be escaped as per
- // Valid chars are mentioned here:
- // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2
- shouldEscape := func(c byte) bool {
- // Alphanum are always allowed.
- if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
- return false
- }
- switch c {
- case '-', '_', '.', '~': // Unreserved characters
- return false
- case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters
- return false
- case ':', '[', ']', '@': // Authority related delimiters
- return false
- }
- // Everything else must be escaped.
- return true
- }
-
- hexCount := 0
- for i := 0; i < len(authority); i++ {
- c := authority[i]
- if shouldEscape(c) {
- hexCount++
- }
- }
-
- if hexCount == 0 {
- return authority
- }
-
- required := len(authority) + 2*hexCount
- t := make([]byte, required)
-
- j := 0
- // This logic is a barebones version of escape in the go net/url library.
- for i := 0; i < len(authority); i++ {
- switch c := authority[i]; {
- case shouldEscape(c):
- t[j] = '%'
- t[j+1] = upperhex[c>>4]
- t[j+2] = upperhex[c&15]
- j += 3
- default:
- t[j] = authority[i]
- j++
- }
- }
- return string(t)
-}
-
-// Determine channel authority. The order of precedence is as follows:
-// - user specified authority override using `WithAuthority` dial option
-// - creds' notion of server name for the authentication handshake
-// - endpoint from dial target of the form "scheme://[authority]/endpoint"
-//
-// Stores the determined authority in `cc.authority`.
-//
-// Returns a non-nil error if the authority returned by the transport
-// credentials do not match the authority configured through the dial option.
-//
-// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
-func (cc *ClientConn) initAuthority() error {
- dopts := cc.dopts
- // Historically, we had two options for users to specify the serverName or
- // authority for a channel. One was through the transport credentials
- // (either in its constructor, or through the OverrideServerName() method).
- // The other option (for cases where WithInsecure() dial option was used)
- // was to use the WithAuthority() dial option.
- //
- // A few things have changed since:
- // - `insecure` package with an implementation of the `TransportCredentials`
- // interface for the insecure case
- // - WithAuthority() dial option support for secure credentials
- authorityFromCreds := ""
- if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" {
- authorityFromCreds = creds.Info().ServerName
- }
- authorityFromDialOption := dopts.authority
- if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption {
- return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption)
- }
-
- endpoint := cc.parsedTarget.Endpoint()
- if authorityFromDialOption != "" {
- cc.authority = authorityFromDialOption
- } else if authorityFromCreds != "" {
- cc.authority = authorityFromCreds
- } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok {
- cc.authority = auth.OverrideAuthority(cc.parsedTarget)
- } else if strings.HasPrefix(endpoint, ":") {
- cc.authority = "localhost" + endpoint
- } else {
- cc.authority = encodeAuthority(endpoint)
- }
- return nil
-}
diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go
deleted file mode 100644
index 959c2f99d..000000000
--- a/vendor/google.golang.org/grpc/codec.go
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "google.golang.org/grpc/encoding"
- _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
- "google.golang.org/grpc/mem"
-)
-
-// baseCodec captures the new encoding.CodecV2 interface without the Name
-// function, allowing it to be implemented by older Codec and encoding.Codec
-// implementations. The omitted Name function is only needed for the register in
-// the encoding package and is not part of the core functionality.
-type baseCodec interface {
- Marshal(v any) (mem.BufferSlice, error)
- Unmarshal(data mem.BufferSlice, v any) error
-}
-
-// getCodec returns an encoding.CodecV2 for the codec of the given name (if
-// registered). Initially checks the V2 registry with encoding.GetCodecV2 and
-// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry
-// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge
-// to turn it into an encoding.CodecV2. Returns nil otherwise.
-func getCodec(name string) encoding.CodecV2 {
- if codecV1 := encoding.GetCodec(name); codecV1 != nil {
- return newCodecV1Bridge(codecV1)
- }
-
- return encoding.GetCodecV2(name)
-}
-
-func newCodecV0Bridge(c Codec) baseCodec {
- return codecV0Bridge{codec: c}
-}
-
-func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 {
- return codecV1Bridge{
- codecV0Bridge: codecV0Bridge{codec: c},
- name: c.Name(),
- }
-}
-
-var _ baseCodec = codecV0Bridge{}
-
-type codecV0Bridge struct {
- codec interface {
- Marshal(v any) ([]byte, error)
- Unmarshal(data []byte, v any) error
- }
-}
-
-func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) {
- data, err := c.codec.Marshal(v)
- if err != nil {
- return nil, err
- }
- return mem.BufferSlice{mem.SliceBuffer(data)}, nil
-}
-
-func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) {
- return c.codec.Unmarshal(data.Materialize(), v)
-}
-
-var _ encoding.CodecV2 = codecV1Bridge{}
-
-type codecV1Bridge struct {
- codecV0Bridge
- name string
-}
-
-func (c codecV1Bridge) Name() string {
- return c.name
-}
-
-// Codec defines the interface gRPC uses to encode and decode messages.
-// Note that implementations of this interface must be thread safe;
-// a Codec's methods can be called from concurrent goroutines.
-//
-// Deprecated: use encoding.Codec instead.
-type Codec interface {
- // Marshal returns the wire format of v.
- Marshal(v any) ([]byte, error)
- // Unmarshal parses the wire format into v.
- Unmarshal(data []byte, v any) error
- // String returns the name of the Codec implementation. This is unused by
- // gRPC.
- String() string
-}
diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go
deleted file mode 100644
index 934fac2b0..000000000
--- a/vendor/google.golang.org/grpc/codes/code_string.go
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package codes
-
-import (
- "strconv"
-
- "google.golang.org/grpc/internal"
-)
-
-func init() {
- internal.CanonicalString = canonicalString
-}
-
-func (c Code) String() string {
- switch c {
- case OK:
- return "OK"
- case Canceled:
- return "Canceled"
- case Unknown:
- return "Unknown"
- case InvalidArgument:
- return "InvalidArgument"
- case DeadlineExceeded:
- return "DeadlineExceeded"
- case NotFound:
- return "NotFound"
- case AlreadyExists:
- return "AlreadyExists"
- case PermissionDenied:
- return "PermissionDenied"
- case ResourceExhausted:
- return "ResourceExhausted"
- case FailedPrecondition:
- return "FailedPrecondition"
- case Aborted:
- return "Aborted"
- case OutOfRange:
- return "OutOfRange"
- case Unimplemented:
- return "Unimplemented"
- case Internal:
- return "Internal"
- case Unavailable:
- return "Unavailable"
- case DataLoss:
- return "DataLoss"
- case Unauthenticated:
- return "Unauthenticated"
- default:
- return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
- }
-}
-
-func canonicalString(c Code) string {
- switch c {
- case OK:
- return "OK"
- case Canceled:
- return "CANCELLED"
- case Unknown:
- return "UNKNOWN"
- case InvalidArgument:
- return "INVALID_ARGUMENT"
- case DeadlineExceeded:
- return "DEADLINE_EXCEEDED"
- case NotFound:
- return "NOT_FOUND"
- case AlreadyExists:
- return "ALREADY_EXISTS"
- case PermissionDenied:
- return "PERMISSION_DENIED"
- case ResourceExhausted:
- return "RESOURCE_EXHAUSTED"
- case FailedPrecondition:
- return "FAILED_PRECONDITION"
- case Aborted:
- return "ABORTED"
- case OutOfRange:
- return "OUT_OF_RANGE"
- case Unimplemented:
- return "UNIMPLEMENTED"
- case Internal:
- return "INTERNAL"
- case Unavailable:
- return "UNAVAILABLE"
- case DataLoss:
- return "DATA_LOSS"
- case Unauthenticated:
- return "UNAUTHENTICATED"
- default:
- return "CODE(" + strconv.FormatInt(int64(c), 10) + ")"
- }
-}
diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
deleted file mode 100644
index 0b42c302b..000000000
--- a/vendor/google.golang.org/grpc/codes/codes.go
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package codes defines the canonical error codes used by gRPC. It is
-// consistent across various languages.
-package codes // import "google.golang.org/grpc/codes"
-
-import (
- "fmt"
- "strconv"
-)
-
-// A Code is a status code defined according to the [gRPC documentation].
-//
-// Only the codes defined as consts in this package are valid codes. Do not use
-// other code values. Behavior of other codes is implementation-specific and
-// interoperability between implementations is not guaranteed.
-//
-// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
-type Code uint32
-
-const (
- // OK is returned on success.
- OK Code = 0
-
- // Canceled indicates the operation was canceled (typically by the caller).
- //
- // The gRPC framework will generate this error code when cancellation
- // is requested.
- Canceled Code = 1
-
- // Unknown error. An example of where this error may be returned is
- // if a Status value received from another address space belongs to
- // an error-space that is not known in this address space. Also
- // errors raised by APIs that do not return enough error information
- // may be converted to this error.
- //
- // The gRPC framework will generate this error code in the above two
- // mentioned cases.
- Unknown Code = 2
-
- // InvalidArgument indicates client specified an invalid argument.
- // Note that this differs from FailedPrecondition. It indicates arguments
- // that are problematic regardless of the state of the system
- // (e.g., a malformed file name).
- //
- // This error code will not be generated by the gRPC framework.
- InvalidArgument Code = 3
-
- // DeadlineExceeded means operation expired before completion.
- // For operations that change the state of the system, this error may be
- // returned even if the operation has completed successfully. For
- // example, a successful response from a server could have been delayed
- // long enough for the deadline to expire.
- //
- // The gRPC framework will generate this error code when the deadline is
- // exceeded.
- DeadlineExceeded Code = 4
-
- // NotFound means some requested entity (e.g., file or directory) was
- // not found.
- //
- // This error code will not be generated by the gRPC framework.
- NotFound Code = 5
-
- // AlreadyExists means an attempt to create an entity failed because one
- // already exists.
- //
- // This error code will not be generated by the gRPC framework.
- AlreadyExists Code = 6
-
- // PermissionDenied indicates the caller does not have permission to
- // execute the specified operation. It must not be used for rejections
- // caused by exhausting some resource (use ResourceExhausted
- // instead for those errors). It must not be
- // used if the caller cannot be identified (use Unauthenticated
- // instead for those errors).
- //
- // This error code will not be generated by the gRPC core framework,
- // but expect authentication middleware to use it.
- PermissionDenied Code = 7
-
- // ResourceExhausted indicates some resource has been exhausted, perhaps
- // a per-user quota, or perhaps the entire file system is out of space.
- //
- // This error code will be generated by the gRPC framework in
- // out-of-memory and server overload situations, or when a message is
- // larger than the configured maximum size.
- ResourceExhausted Code = 8
-
- // FailedPrecondition indicates operation was rejected because the
- // system is not in a state required for the operation's execution.
- // For example, directory to be deleted may be non-empty, an rmdir
- // operation is applied to a non-directory, etc.
- //
- // A litmus test that may help a service implementor in deciding
- // between FailedPrecondition, Aborted, and Unavailable:
- // (a) Use Unavailable if the client can retry just the failing call.
- // (b) Use Aborted if the client should retry at a higher-level
- // (e.g., restarting a read-modify-write sequence).
- // (c) Use FailedPrecondition if the client should not retry until
- // the system state has been explicitly fixed. E.g., if an "rmdir"
- // fails because the directory is non-empty, FailedPrecondition
- // should be returned since the client should not retry unless
- // they have first fixed up the directory by deleting files from it.
- // (d) Use FailedPrecondition if the client performs conditional
- // REST Get/Update/Delete on a resource and the resource on the
- // server does not match the condition. E.g., conflicting
- // read-modify-write on the same resource.
- //
- // This error code will not be generated by the gRPC framework.
- FailedPrecondition Code = 9
-
- // Aborted indicates the operation was aborted, typically due to a
- // concurrency issue like sequencer check failures, transaction aborts,
- // etc.
- //
- // See litmus test above for deciding between FailedPrecondition,
- // Aborted, and Unavailable.
- //
- // This error code will not be generated by the gRPC framework.
- Aborted Code = 10
-
- // OutOfRange means operation was attempted past the valid range.
- // E.g., seeking or reading past end of file.
- //
- // Unlike InvalidArgument, this error indicates a problem that may
- // be fixed if the system state changes. For example, a 32-bit file
- // system will generate InvalidArgument if asked to read at an
- // offset that is not in the range [0,2^32-1], but it will generate
- // OutOfRange if asked to read from an offset past the current
- // file size.
- //
- // There is a fair bit of overlap between FailedPrecondition and
- // OutOfRange. We recommend using OutOfRange (the more specific
- // error) when it applies so that callers who are iterating through
- // a space can easily look for an OutOfRange error to detect when
- // they are done.
- //
- // This error code will not be generated by the gRPC framework.
- OutOfRange Code = 11
-
- // Unimplemented indicates operation is not implemented or not
- // supported/enabled in this service.
- //
- // This error code will be generated by the gRPC framework. Most
- // commonly, you will see this error code when a method implementation
- // is missing on the server. It can also be generated for unknown
- // compression algorithms or a disagreement as to whether an RPC should
- // be streaming.
- Unimplemented Code = 12
-
- // Internal errors. Means some invariants expected by underlying
- // system has been broken. If you see one of these errors,
- // something is very broken.
- //
- // This error code will be generated by the gRPC framework in several
- // internal error conditions.
- Internal Code = 13
-
- // Unavailable indicates the service is currently unavailable.
- // This is a most likely a transient condition and may be corrected
- // by retrying with a backoff. Note that it is not always safe to retry
- // non-idempotent operations.
- //
- // See litmus test above for deciding between FailedPrecondition,
- // Aborted, and Unavailable.
- //
- // This error code will be generated by the gRPC framework during
- // abrupt shutdown of a server process or network connection.
- Unavailable Code = 14
-
- // DataLoss indicates unrecoverable data loss or corruption.
- //
- // This error code will not be generated by the gRPC framework.
- DataLoss Code = 15
-
- // Unauthenticated indicates the request does not have valid
- // authentication credentials for the operation.
- //
- // The gRPC framework will generate this error code when the
- // authentication metadata is invalid or a Credentials callback fails,
- // but also expect authentication middleware to generate it.
- Unauthenticated Code = 16
-
- _maxCode = 17
-)
-
-var strToCode = map[string]Code{
- `"OK"`: OK,
- `"CANCELLED"`:/* [sic] */ Canceled,
- `"UNKNOWN"`: Unknown,
- `"INVALID_ARGUMENT"`: InvalidArgument,
- `"DEADLINE_EXCEEDED"`: DeadlineExceeded,
- `"NOT_FOUND"`: NotFound,
- `"ALREADY_EXISTS"`: AlreadyExists,
- `"PERMISSION_DENIED"`: PermissionDenied,
- `"RESOURCE_EXHAUSTED"`: ResourceExhausted,
- `"FAILED_PRECONDITION"`: FailedPrecondition,
- `"ABORTED"`: Aborted,
- `"OUT_OF_RANGE"`: OutOfRange,
- `"UNIMPLEMENTED"`: Unimplemented,
- `"INTERNAL"`: Internal,
- `"UNAVAILABLE"`: Unavailable,
- `"DATA_LOSS"`: DataLoss,
- `"UNAUTHENTICATED"`: Unauthenticated,
-}
-
-// UnmarshalJSON unmarshals b into the Code.
-func (c *Code) UnmarshalJSON(b []byte) error {
- // From json.Unmarshaler: By convention, to approximate the behavior of
- // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
- // a no-op.
- if string(b) == "null" {
- return nil
- }
- if c == nil {
- return fmt.Errorf("nil receiver passed to UnmarshalJSON")
- }
-
- if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
- if ci >= _maxCode {
- return fmt.Errorf("invalid code: %d", ci)
- }
-
- *c = Code(ci)
- return nil
- }
-
- if jc, ok := strToCode[string(b)]; ok {
- *c = jc
- return nil
- }
- return fmt.Errorf("invalid code: %q", string(b))
-}
diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go
deleted file mode 100644
index 4a8992642..000000000
--- a/vendor/google.golang.org/grpc/connectivity/connectivity.go
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package connectivity defines connectivity semantics.
-// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md.
-package connectivity
-
-import (
- "google.golang.org/grpc/grpclog"
-)
-
-var logger = grpclog.Component("core")
-
-// State indicates the state of connectivity.
-// It can be the state of a ClientConn or SubConn.
-type State int
-
-func (s State) String() string {
- switch s {
- case Idle:
- return "IDLE"
- case Connecting:
- return "CONNECTING"
- case Ready:
- return "READY"
- case TransientFailure:
- return "TRANSIENT_FAILURE"
- case Shutdown:
- return "SHUTDOWN"
- default:
- logger.Errorf("unknown connectivity state: %d", s)
- return "INVALID_STATE"
- }
-}
-
-const (
- // Idle indicates the ClientConn is idle.
- Idle State = iota
- // Connecting indicates the ClientConn is connecting.
- Connecting
- // Ready indicates the ClientConn is ready for work.
- Ready
- // TransientFailure indicates the ClientConn has seen a failure but expects to recover.
- TransientFailure
- // Shutdown indicates the ClientConn has started shutting down.
- Shutdown
-)
-
-// ServingMode indicates the current mode of operation of the server.
-//
-// Only xDS enabled gRPC servers currently report their serving mode.
-type ServingMode int
-
-const (
- // ServingModeStarting indicates that the server is starting up.
- ServingModeStarting ServingMode = iota
- // ServingModeServing indicates that the server contains all required
- // configuration and is serving RPCs.
- ServingModeServing
- // ServingModeNotServing indicates that the server is not accepting new
- // connections. Existing connections will be closed gracefully, allowing
- // in-progress RPCs to complete. A server enters this mode when it does not
- // contain the required configuration to serve RPCs.
- ServingModeNotServing
-)
-
-func (s ServingMode) String() string {
- switch s {
- case ServingModeStarting:
- return "STARTING"
- case ServingModeServing:
- return "SERVING"
- case ServingModeNotServing:
- return "NOT_SERVING"
- default:
- logger.Errorf("unknown serving mode: %d", s)
- return "INVALID_MODE"
- }
-}
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
deleted file mode 100644
index 665e790bb..000000000
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package credentials implements various credentials supported by gRPC library,
-// which encapsulate all the state needed by a client to authenticate with a
-// server and make various assertions, e.g., about the client's identity, role,
-// or whether it is authorized to make a particular call.
-package credentials // import "google.golang.org/grpc/credentials"
-
-import (
- "context"
- "errors"
- "fmt"
- "net"
-
- "google.golang.org/grpc/attributes"
- icredentials "google.golang.org/grpc/internal/credentials"
- "google.golang.org/protobuf/proto"
-)
-
-// PerRPCCredentials defines the common interface for the credentials which need to
-// attach security information to every RPC (e.g., oauth2).
-type PerRPCCredentials interface {
- // GetRequestMetadata gets the current request metadata, refreshing tokens
- // if required. This should be called by the transport layer on each
- // request, and the data should be populated in headers or other
- // context. If a status code is returned, it will be used as the status for
- // the RPC (restricted to an allowable set of codes as defined by gRFC
- // A54). uri is the URI of the entry point for the request. When supported
- // by the underlying implementation, ctx can be used for timeout and
- // cancellation. Additionally, RequestInfo data will be available via ctx
- // to this call. TODO(zhaoq): Define the set of the qualified keys instead
- // of leaving it as an arbitrary string.
- GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
- // RequireTransportSecurity indicates whether the credentials requires
- // transport security.
- RequireTransportSecurity() bool
-}
-
-// SecurityLevel defines the protection level on an established connection.
-//
-// This API is experimental.
-type SecurityLevel int
-
-const (
- // InvalidSecurityLevel indicates an invalid security level.
- // The zero SecurityLevel value is invalid for backward compatibility.
- InvalidSecurityLevel SecurityLevel = iota
- // NoSecurity indicates a connection is insecure.
- NoSecurity
- // IntegrityOnly indicates a connection only provides integrity protection.
- IntegrityOnly
- // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
- PrivacyAndIntegrity
-)
-
-// String returns SecurityLevel in a string format.
-func (s SecurityLevel) String() string {
- switch s {
- case NoSecurity:
- return "NoSecurity"
- case IntegrityOnly:
- return "IntegrityOnly"
- case PrivacyAndIntegrity:
- return "PrivacyAndIntegrity"
- }
- return fmt.Sprintf("invalid SecurityLevel: %v", int(s))
-}
-
-// CommonAuthInfo contains authenticated information common to AuthInfo implementations.
-// It should be embedded in a struct implementing AuthInfo to provide additional information
-// about the credentials.
-//
-// This API is experimental.
-type CommonAuthInfo struct {
- SecurityLevel SecurityLevel
-}
-
-// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct.
-func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo {
- return c
-}
-
-// ProtocolInfo provides information regarding the gRPC wire protocol version,
-// security protocol, security protocol version in use, server name, etc.
-type ProtocolInfo struct {
- // ProtocolVersion is the gRPC wire protocol version.
- ProtocolVersion string
- // SecurityProtocol is the security protocol in use.
- SecurityProtocol string
- // SecurityVersion is the security protocol version. It is a static version string from the
- // credentials, not a value that reflects per-connection protocol negotiation. To retrieve
- // details about the credentials used for a connection, use the Peer's AuthInfo field instead.
- //
- // Deprecated: please use Peer.AuthInfo.
- SecurityVersion string
- // ServerName is the user-configured server name.
- ServerName string
-}
-
-// AuthInfo defines the common interface for the auth information the users are interested in.
-// A struct that implements AuthInfo should embed CommonAuthInfo by including additional
-// information about the credentials in it.
-type AuthInfo interface {
- AuthType() string
-}
-
-// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
-// and the caller should not close rawConn.
-var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
-
-// TransportCredentials defines the common interface for all the live gRPC wire
-// protocols and supported transport security protocols (e.g., TLS, SSL).
-type TransportCredentials interface {
- // ClientHandshake does the authentication handshake specified by the
- // corresponding authentication protocol on rawConn for clients. It returns
- // the authenticated connection and the corresponding auth information
- // about the connection. The auth information should embed CommonAuthInfo
- // to return additional information about the credentials. Implementations
- // must use the provided context to implement timely cancellation. gRPC
- // will try to reconnect if the error returned is a temporary error
- // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the
- // returned error is a wrapper error, implementations should make sure that
- // the error implements Temporary() to have the correct retry behaviors.
- // Additionally, ClientHandshakeInfo data will be available via the context
- // passed to this call.
- //
- // The second argument to this method is the `:authority` header value used
- // while creating new streams on this connection after authentication
- // succeeds. Implementations must use this as the server name during the
- // authentication handshake.
- //
- // If the returned net.Conn is closed, it MUST close the net.Conn provided.
- ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
- // ServerHandshake does the authentication handshake for servers. It returns
- // the authenticated connection and the corresponding auth information about
- // the connection. The auth information should embed CommonAuthInfo to return additional information
- // about the credentials.
- //
- // If the returned net.Conn is closed, it MUST close the net.Conn provided.
- ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
- // Info provides the ProtocolInfo of this TransportCredentials.
- Info() ProtocolInfo
- // Clone makes a copy of this TransportCredentials.
- Clone() TransportCredentials
- // OverrideServerName specifies the value used for the following:
- // - verifying the hostname on the returned certificates
- // - as SNI in the client's handshake to support virtual hosting
- // - as the value for `:authority` header at stream creation time
- //
- // Deprecated: use grpc.WithAuthority instead. Will be supported
- // throughout 1.x.
- OverrideServerName(string) error
-}
-
-// Bundle is a combination of TransportCredentials and PerRPCCredentials.
-//
-// It also contains a mode switching method, so it can be used as a combination
-// of different credential policies.
-//
-// Bundle cannot be used together with individual TransportCredentials.
-// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials.
-//
-// This API is experimental.
-type Bundle interface {
- // TransportCredentials returns the transport credentials from the Bundle.
- //
- // Implementations must return non-nil transport credentials. If transport
- // security is not needed by the Bundle, implementations may choose to
- // return insecure.NewCredentials().
- TransportCredentials() TransportCredentials
-
- // PerRPCCredentials returns the per-RPC credentials from the Bundle.
- //
- // May be nil if per-RPC credentials are not needed.
- PerRPCCredentials() PerRPCCredentials
-
- // NewWithMode should make a copy of Bundle, and switch mode. Modifying the
- // existing Bundle may cause races.
- //
- // NewWithMode returns nil if the requested mode is not supported.
- NewWithMode(mode string) (Bundle, error)
-}
-
-// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls.
-//
-// This API is experimental.
-type RequestInfo struct {
- // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method")
- Method string
- // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake)
- AuthInfo AuthInfo
-}
-
-// RequestInfoFromContext extracts the RequestInfo from the context if it exists.
-//
-// This API is experimental.
-func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
- ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo)
- return ri, ok
-}
-
-// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes
-// it possible to pass arbitrary data to the handshaker from gRPC, resolver,
-// balancer etc. Individual credential implementations control the actual
-// format of the data that they are willing to receive.
-//
-// This API is experimental.
-type ClientHandshakeInfo struct {
- // Attributes contains the attributes for the address. It could be provided
- // by the gRPC, resolver, balancer etc.
- Attributes *attributes.Attributes
-}
-
-// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored
-// in ctx.
-//
-// This API is experimental.
-func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
- chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo)
- return chi
-}
-
-// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
-// It returns success if 1) the condition is satisfied or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
-// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
-//
-// This API is experimental.
-func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error {
- type internalInfo interface {
- GetCommonAuthInfo() CommonAuthInfo
- }
- if ai == nil {
- return errors.New("AuthInfo is nil")
- }
- if ci, ok := ai.(internalInfo); ok {
- // CommonAuthInfo.SecurityLevel has an invalid value.
- if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel {
- return nil
- }
- if ci.GetCommonAuthInfo().SecurityLevel < level {
- return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel)
- }
- }
- // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method.
- return nil
-}
-
-// ChannelzSecurityInfo defines the interface that security protocols should implement
-// in order to provide security info to channelz.
-//
-// This API is experimental.
-type ChannelzSecurityInfo interface {
- GetSecurityValue() ChannelzSecurityValue
-}
-
-// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
-// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
-// and *OtherChannelzSecurityValue.
-//
-// This API is experimental.
-type ChannelzSecurityValue interface {
- isChannelzSecurityValue()
-}
-
-// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
-// from GetSecurityValue(), which contains protocol specific security info. Note
-// the Value field will be sent to users of channelz requesting channel info, and
-// thus sensitive info should better be avoided.
-//
-// This API is experimental.
-type OtherChannelzSecurityValue struct {
- ChannelzSecurityValue
- Name string
- Value proto.Message
-}
diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
deleted file mode 100644
index 4c805c644..000000000
--- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package insecure provides an implementation of the
-// credentials.TransportCredentials interface which disables transport security.
-package insecure
-
-import (
- "context"
- "net"
-
- "google.golang.org/grpc/credentials"
-)
-
-// NewCredentials returns a credentials which disables transport security.
-//
-// Note that using this credentials with per-RPC credentials which require
-// transport security is incompatible and will cause grpc.Dial() to fail.
-func NewCredentials() credentials.TransportCredentials {
- return insecureTC{}
-}
-
-// insecureTC implements the insecure transport credentials. The handshake
-// methods simply return the passed in net.Conn and set the security level to
-// NoSecurity.
-type insecureTC struct{}
-
-func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
- return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
-}
-
-func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
- return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
-}
-
-func (insecureTC) Info() credentials.ProtocolInfo {
- return credentials.ProtocolInfo{SecurityProtocol: "insecure"}
-}
-
-func (insecureTC) Clone() credentials.TransportCredentials {
- return insecureTC{}
-}
-
-func (insecureTC) OverrideServerName(string) error {
- return nil
-}
-
-// info contains the auth information for an insecure connection.
-// It implements the AuthInfo interface.
-type info struct {
- credentials.CommonAuthInfo
-}
-
-// AuthType returns the type of info as a string.
-func (info) AuthType() string {
- return "insecure"
-}
-
-// insecureBundle implements an insecure bundle.
-// An insecure bundle provides a thin wrapper around insecureTC to support
-// the credentials.Bundle interface.
-type insecureBundle struct{}
-
-// NewBundle returns a bundle with disabled transport security and no per rpc credential.
-func NewBundle() credentials.Bundle {
- return insecureBundle{}
-}
-
-// NewWithMode returns a new insecure Bundle. The mode is ignored.
-func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) {
- return insecureBundle{}, nil
-}
-
-// PerRPCCredentials returns an nil implementation as insecure
-// bundle does not support a per rpc credential.
-func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials {
- return nil
-}
-
-// TransportCredentials returns the underlying insecure transport credential.
-func (insecureBundle) TransportCredentials() credentials.TransportCredentials {
- return NewCredentials()
-}
diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
deleted file mode 100644
index bd5fe22b6..000000000
--- a/vendor/google.golang.org/grpc/credentials/tls.go
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package credentials
-
-import (
- "context"
- "crypto/tls"
- "crypto/x509"
- "fmt"
- "net"
- "net/url"
- "os"
-
- "google.golang.org/grpc/grpclog"
- credinternal "google.golang.org/grpc/internal/credentials"
- "google.golang.org/grpc/internal/envconfig"
-)
-
-const alpnFailureHelpMessage = "If you upgraded from a grpc-go version earlier than 1.67, your TLS connections may have stopped working due to ALPN enforcement. For more details, see: https://github.com/grpc/grpc-go/issues/434"
-
-var logger = grpclog.Component("credentials")
-
-// TLSInfo contains the auth information for a TLS authenticated connection.
-// It implements the AuthInfo interface.
-type TLSInfo struct {
- State tls.ConnectionState
- CommonAuthInfo
- // This API is experimental.
- SPIFFEID *url.URL
-}
-
-// AuthType returns the type of TLSInfo as a string.
-func (t TLSInfo) AuthType() string {
- return "tls"
-}
-
-// cipherSuiteLookup returns the string version of a TLS cipher suite ID.
-func cipherSuiteLookup(cipherSuiteID uint16) string {
- for _, s := range tls.CipherSuites() {
- if s.ID == cipherSuiteID {
- return s.Name
- }
- }
- for _, s := range tls.InsecureCipherSuites() {
- if s.ID == cipherSuiteID {
- return s.Name
- }
- }
- return fmt.Sprintf("unknown ID: %v", cipherSuiteID)
-}
-
-// GetSecurityValue returns security info requested by channelz.
-func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
- v := &TLSChannelzSecurityValue{
- StandardName: cipherSuiteLookup(t.State.CipherSuite),
- }
- // Currently there's no way to get LocalCertificate info from tls package.
- if len(t.State.PeerCertificates) > 0 {
- v.RemoteCertificate = t.State.PeerCertificates[0].Raw
- }
- return v
-}
-
-// tlsCreds is the credentials required for authenticating a connection using TLS.
-type tlsCreds struct {
- // TLS configuration
- config *tls.Config
-}
-
-func (c tlsCreds) Info() ProtocolInfo {
- return ProtocolInfo{
- SecurityProtocol: "tls",
- SecurityVersion: "1.2",
- ServerName: c.config.ServerName,
- }
-}
-
-func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
- // use local cfg to avoid clobbering ServerName if using multiple endpoints
- cfg := credinternal.CloneTLSConfig(c.config)
- if cfg.ServerName == "" {
- serverName, _, err := net.SplitHostPort(authority)
- if err != nil {
- // If the authority had no host port or if the authority cannot be parsed, use it as-is.
- serverName = authority
- }
- cfg.ServerName = serverName
- }
- conn := tls.Client(rawConn, cfg)
- errChannel := make(chan error, 1)
- go func() {
- errChannel <- conn.Handshake()
- close(errChannel)
- }()
- select {
- case err := <-errChannel:
- if err != nil {
- conn.Close()
- return nil, nil, err
- }
- case <-ctx.Done():
- conn.Close()
- return nil, nil, ctx.Err()
- }
-
- // The negotiated protocol can be either of the following:
- // 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since
- // it is the only protocol advertised by the client during the handshake.
- // The tls library ensures that the server chooses a protocol advertised
- // by the client.
- // 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement
- // for using HTTP/2 over TLS. We can terminate the connection immediately.
- np := conn.ConnectionState().NegotiatedProtocol
- if np == "" {
- if envconfig.EnforceALPNEnabled {
- conn.Close()
- return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage)
- }
- logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName)
- }
- tlsInfo := TLSInfo{
- State: conn.ConnectionState(),
- CommonAuthInfo: CommonAuthInfo{
- SecurityLevel: PrivacyAndIntegrity,
- },
- }
- id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
- if id != nil {
- tlsInfo.SPIFFEID = id
- }
- return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
-}
-
-func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
- conn := tls.Server(rawConn, c.config)
- if err := conn.Handshake(); err != nil {
- conn.Close()
- return nil, nil, err
- }
- cs := conn.ConnectionState()
- // The negotiated application protocol can be empty only if the client doesn't
- // support ALPN. In such cases, we can close the connection since ALPN is required
- // for using HTTP/2 over TLS.
- if cs.NegotiatedProtocol == "" {
- if envconfig.EnforceALPNEnabled {
- conn.Close()
- return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage)
- } else if logger.V(2) {
- logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases")
- }
- }
- tlsInfo := TLSInfo{
- State: cs,
- CommonAuthInfo: CommonAuthInfo{
- SecurityLevel: PrivacyAndIntegrity,
- },
- }
- id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
- if id != nil {
- tlsInfo.SPIFFEID = id
- }
- return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
-}
-
-func (c *tlsCreds) Clone() TransportCredentials {
- return NewTLS(c.config)
-}
-
-func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
- c.config.ServerName = serverNameOverride
- return nil
-}
-
-// The following cipher suites are forbidden for use with HTTP/2 by
-// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
-var tls12ForbiddenCipherSuites = map[uint16]struct{}{
- tls.TLS_RSA_WITH_AES_128_CBC_SHA: {},
- tls.TLS_RSA_WITH_AES_256_CBC_SHA: {},
- tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {},
- tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {},
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {},
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {},
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {},
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {},
-}
-
-// NewTLS uses c to construct a TransportCredentials based on TLS.
-func NewTLS(c *tls.Config) TransportCredentials {
- config := applyDefaults(c)
- if config.GetConfigForClient != nil {
- oldFn := config.GetConfigForClient
- config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) {
- cfgForClient, err := oldFn(hello)
- if err != nil || cfgForClient == nil {
- return cfgForClient, err
- }
- return applyDefaults(cfgForClient), nil
- }
- }
- return &tlsCreds{config: config}
-}
-
-func applyDefaults(c *tls.Config) *tls.Config {
- config := credinternal.CloneTLSConfig(c)
- config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos)
- // If the user did not configure a MinVersion and did not configure a
- // MaxVersion < 1.2, use MinVersion=1.2, which is required by
- // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2
- if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) {
- config.MinVersion = tls.VersionTLS12
- }
- // If the user did not configure CipherSuites, use all "secure" cipher
- // suites reported by the TLS package, but remove some explicitly forbidden
- // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
- if config.CipherSuites == nil {
- for _, cs := range tls.CipherSuites() {
- if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok {
- config.CipherSuites = append(config.CipherSuites, cs.ID)
- }
- }
- }
- return config
-}
-
-// NewClientTLSFromCert constructs TLS credentials from the provided root
-// certificate authority certificate(s) to validate server connections. If
-// certificates to establish the identity of the client need to be included in
-// the credentials (eg: for mTLS), use NewTLS instead, where a complete
-// tls.Config can be specified.
-// serverNameOverride is for testing only. If set to a non empty string,
-// it will override the virtual host name of authority (e.g. :authority header
-// field) in requests.
-func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
- return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
-}
-
-// NewClientTLSFromFile constructs TLS credentials from the provided root
-// certificate authority certificate file(s) to validate server connections. If
-// certificates to establish the identity of the client need to be included in
-// the credentials (eg: for mTLS), use NewTLS instead, where a complete
-// tls.Config can be specified.
-// serverNameOverride is for testing only. If set to a non empty string,
-// it will override the virtual host name of authority (e.g. :authority header
-// field) in requests.
-func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
- b, err := os.ReadFile(certFile)
- if err != nil {
- return nil, err
- }
- cp := x509.NewCertPool()
- if !cp.AppendCertsFromPEM(b) {
- return nil, fmt.Errorf("credentials: failed to append certificates")
- }
- return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
-}
-
-// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
-func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
- return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
-}
-
-// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
-// file for server.
-func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
- cert, err := tls.LoadX509KeyPair(certFile, keyFile)
- if err != nil {
- return nil, err
- }
- return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
-}
-
-// TLSChannelzSecurityValue defines the struct that TLS protocol should return
-// from GetSecurityValue(), containing security info like cipher and certificate used.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type TLSChannelzSecurityValue struct {
- ChannelzSecurityValue
- StandardName string
- LocalCertificate []byte
- RemoteCertificate []byte
-}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
deleted file mode 100644
index 405a2ffeb..000000000
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ /dev/null
@@ -1,774 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
- "net"
- "net/url"
- "time"
-
- "google.golang.org/grpc/backoff"
- "google.golang.org/grpc/channelz"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/credentials/insecure"
- "google.golang.org/grpc/internal"
- internalbackoff "google.golang.org/grpc/internal/backoff"
- "google.golang.org/grpc/internal/binarylog"
- "google.golang.org/grpc/internal/transport"
- "google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/mem"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/stats"
-)
-
-const (
- // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges
- defaultMaxCallAttempts = 5
-)
-
-func init() {
- internal.AddGlobalDialOptions = func(opt ...DialOption) {
- globalDialOptions = append(globalDialOptions, opt...)
- }
- internal.ClearGlobalDialOptions = func() {
- globalDialOptions = nil
- }
- internal.AddGlobalPerTargetDialOptions = func(opt any) {
- if ptdo, ok := opt.(perTargetDialOption); ok {
- globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo)
- }
- }
- internal.ClearGlobalPerTargetDialOptions = func() {
- globalPerTargetDialOptions = nil
- }
- internal.WithBinaryLogger = withBinaryLogger
- internal.JoinDialOptions = newJoinDialOption
- internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
- internal.WithBufferPool = withBufferPool
-}
-
-// dialOptions configure a Dial call. dialOptions are set by the DialOption
-// values passed to Dial.
-type dialOptions struct {
- unaryInt UnaryClientInterceptor
- streamInt StreamClientInterceptor
-
- chainUnaryInts []UnaryClientInterceptor
- chainStreamInts []StreamClientInterceptor
-
- compressorV0 Compressor
- dc Decompressor
- bs internalbackoff.Strategy
- block bool
- returnLastError bool
- timeout time.Duration
- authority string
- binaryLogger binarylog.Logger
- copts transport.ConnectOptions
- callOptions []CallOption
- channelzParent channelz.Identifier
- disableServiceConfig bool
- disableRetry bool
- disableHealthCheck bool
- minConnectTimeout func() time.Duration
- defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
- defaultServiceConfigRawJSON *string
- resolvers []resolver.Builder
- idleTimeout time.Duration
- defaultScheme string
- maxCallAttempts int
- enableLocalDNSResolution bool // Specifies if target hostnames should be resolved when proxying is enabled.
- useProxy bool // Specifies if a server should be connected via proxy.
-}
-
-// DialOption configures how we set up the connection.
-type DialOption interface {
- apply(*dialOptions)
-}
-
-var globalDialOptions []DialOption
-
-// perTargetDialOption takes a parsed target and returns a dial option to apply.
-//
-// This gets called after NewClient() parses the target, and allows per target
-// configuration set through a returned DialOption. The DialOption will not take
-// effect if specifies a resolver builder, as that Dial Option is factored in
-// while parsing target.
-type perTargetDialOption interface {
- // DialOption returns a Dial Option to apply.
- DialOptionForTarget(parsedTarget url.URL) DialOption
-}
-
-var globalPerTargetDialOptions []perTargetDialOption
-
-// EmptyDialOption does not alter the dial configuration. It can be embedded in
-// another structure to build custom dial options.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type EmptyDialOption struct{}
-
-func (EmptyDialOption) apply(*dialOptions) {}
-
-type disableGlobalDialOptions struct{}
-
-func (disableGlobalDialOptions) apply(*dialOptions) {}
-
-// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn
-// from applying the global DialOptions (set via AddGlobalDialOptions).
-func newDisableGlobalDialOptions() DialOption {
- return &disableGlobalDialOptions{}
-}
-
-// funcDialOption wraps a function that modifies dialOptions into an
-// implementation of the DialOption interface.
-type funcDialOption struct {
- f func(*dialOptions)
-}
-
-func (fdo *funcDialOption) apply(do *dialOptions) {
- fdo.f(do)
-}
-
-func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
- return &funcDialOption{
- f: f,
- }
-}
-
-type joinDialOption struct {
- opts []DialOption
-}
-
-func (jdo *joinDialOption) apply(do *dialOptions) {
- for _, opt := range jdo.opts {
- opt.apply(do)
- }
-}
-
-func newJoinDialOption(opts ...DialOption) DialOption {
- return &joinDialOption{opts: opts}
-}
-
-// WithSharedWriteBuffer allows reusing per-connection transport write buffer.
-// If this option is set to true every connection will release the buffer after
-// flushing the data on the wire.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func WithSharedWriteBuffer(val bool) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.SharedWriteBuffer = val
- })
-}
-
-// WithWriteBufferSize determines how much data can be batched before doing a
-// write on the wire. The default value for this buffer is 32KB.
-//
-// Zero or negative values will disable the write buffer such that each write
-// will be on underlying connection. Note: A Send call may not directly
-// translate to a write.
-func WithWriteBufferSize(s int) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.WriteBufferSize = s
- })
-}
-
-// WithReadBufferSize lets you set the size of read buffer, this determines how
-// much data can be read at most for each read syscall.
-//
-// The default value for this buffer is 32KB. Zero or negative values will
-// disable read buffer for a connection so data framer can access the
-// underlying conn directly.
-func WithReadBufferSize(s int) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.ReadBufferSize = s
- })
-}
-
-// WithInitialWindowSize returns a DialOption which sets the value for initial
-// window size on a stream. The lower bound for window size is 64K and any value
-// smaller than that will be ignored.
-func WithInitialWindowSize(s int32) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.InitialWindowSize = s
- })
-}
-
-// WithInitialConnWindowSize returns a DialOption which sets the value for
-// initial window size on a connection. The lower bound for window size is 64K
-// and any value smaller than that will be ignored.
-func WithInitialConnWindowSize(s int32) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.InitialConnWindowSize = s
- })
-}
-
-// WithMaxMsgSize returns a DialOption which sets the maximum message size the
-// client can receive.
-//
-// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will
-// be supported throughout 1.x.
-func WithMaxMsgSize(s int) DialOption {
- return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
-}
-
-// WithDefaultCallOptions returns a DialOption which sets the default
-// CallOptions for calls over the connection.
-func WithDefaultCallOptions(cos ...CallOption) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.callOptions = append(o.callOptions, cos...)
- })
-}
-
-// WithCodec returns a DialOption which sets a codec for message marshaling and
-// unmarshaling.
-//
-// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be
-// supported throughout 1.x.
-func WithCodec(c Codec) DialOption {
- return WithDefaultCallOptions(CallCustomCodec(c))
-}
-
-// WithCompressor returns a DialOption which sets a Compressor to use for
-// message compression. It has lower priority than the compressor set by the
-// UseCompressor CallOption.
-//
-// Deprecated: use UseCompressor instead. Will be supported throughout 1.x.
-func WithCompressor(cp Compressor) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.compressorV0 = cp
- })
-}
-
-// WithDecompressor returns a DialOption which sets a Decompressor to use for
-// incoming message decompression. If incoming response messages are encoded
-// using the decompressor's Type(), it will be used. Otherwise, the message
-// encoding will be used to look up the compressor registered via
-// encoding.RegisterCompressor, which will then be used to decompress the
-// message. If no compressor is registered for the encoding, an Unimplemented
-// status error will be returned.
-//
-// Deprecated: use encoding.RegisterCompressor instead. Will be supported
-// throughout 1.x.
-func WithDecompressor(dc Decompressor) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.dc = dc
- })
-}
-
-// WithConnectParams configures the ClientConn to use the provided ConnectParams
-// for creating and maintaining connections to servers.
-//
-// The backoff configuration specified as part of the ConnectParams overrides
-// all defaults specified in
-// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider
-// using the backoff.DefaultConfig as a base, in cases where you want to
-// override only a subset of the backoff configuration.
-func WithConnectParams(p ConnectParams) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.bs = internalbackoff.Exponential{Config: p.Backoff}
- o.minConnectTimeout = func() time.Duration {
- return p.MinConnectTimeout
- }
- })
-}
-
-// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
-// when backing off after failed connection attempts.
-//
-// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x.
-func WithBackoffMaxDelay(md time.Duration) DialOption {
- return WithBackoffConfig(BackoffConfig{MaxDelay: md})
-}
-
-// WithBackoffConfig configures the dialer to use the provided backoff
-// parameters after connection failures.
-//
-// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x.
-func WithBackoffConfig(b BackoffConfig) DialOption {
- bc := backoff.DefaultConfig
- bc.MaxDelay = b.MaxDelay
- return withBackoff(internalbackoff.Exponential{Config: bc})
-}
-
-// withBackoff sets the backoff strategy used for connectRetryNum after a failed
-// connection attempt.
-//
-// This can be exported if arbitrary backoff strategies are allowed by gRPC.
-func withBackoff(bs internalbackoff.Strategy) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.bs = bs
- })
-}
-
-// WithBlock returns a DialOption which makes callers of Dial block until the
-// underlying connection is up. Without this, Dial returns immediately and
-// connecting the server happens in background.
-//
-// Use of this feature is not recommended. For more information, please see:
-// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
-//
-// Deprecated: this DialOption is not supported by NewClient.
-// Will be supported throughout 1.x.
-func WithBlock() DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.block = true
- })
-}
-
-// WithReturnConnectionError returns a DialOption which makes the client connection
-// return a string containing both the last connection error that occurred and
-// the context.DeadlineExceeded error.
-// Implies WithBlock()
-//
-// Use of this feature is not recommended. For more information, please see:
-// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
-//
-// Deprecated: this DialOption is not supported by NewClient.
-// Will be supported throughout 1.x.
-func WithReturnConnectionError() DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.block = true
- o.returnLastError = true
- })
-}
-
-// WithInsecure returns a DialOption which disables transport security for this
-// ClientConn. Under the hood, it uses insecure.NewCredentials().
-//
-// Note that using this DialOption with per-RPC credentials (through
-// WithCredentialsBundle or WithPerRPCCredentials) which require transport
-// security is incompatible and will cause grpc.Dial() to fail.
-//
-// Deprecated: use WithTransportCredentials and insecure.NewCredentials()
-// instead. Will be supported throughout 1.x.
-func WithInsecure() DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.TransportCredentials = insecure.NewCredentials()
- })
-}
-
-// WithNoProxy returns a DialOption which disables the use of proxies for this
-// ClientConn. This is ignored if WithDialer or WithContextDialer are used.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func WithNoProxy() DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.useProxy = false
- })
-}
-
-// WithLocalDNSResolution forces local DNS name resolution even when a proxy is
-// specified in the environment. By default, the server name is provided
-// directly to the proxy as part of the CONNECT handshake. This is ignored if
-// WithNoProxy is used.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func WithLocalDNSResolution() DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.enableLocalDNSResolution = true
- })
-}
-
-// WithTransportCredentials returns a DialOption which configures a connection
-// level security credentials (e.g., TLS/SSL). This should not be used together
-// with WithCredentialsBundle.
-func WithTransportCredentials(creds credentials.TransportCredentials) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.TransportCredentials = creds
- })
-}
-
-// WithPerRPCCredentials returns a DialOption which sets credentials and places
-// auth state on each outbound RPC.
-func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
- })
-}
-
-// WithCredentialsBundle returns a DialOption to set a credentials bundle for
-// the ClientConn.WithCreds. This should not be used together with
-// WithTransportCredentials.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func WithCredentialsBundle(b credentials.Bundle) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.CredsBundle = b
- })
-}
-
-// WithTimeout returns a DialOption that configures a timeout for dialing a
-// ClientConn initially. This is valid if and only if WithBlock() is present.
-//
-// Deprecated: this DialOption is not supported by NewClient.
-// Will be supported throughout 1.x.
-func WithTimeout(d time.Duration) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.timeout = d
- })
-}
-
-// WithContextDialer returns a DialOption that sets a dialer to create
-// connections. If FailOnNonTempDialError() is set to true, and an error is
-// returned by f, gRPC checks the error's Temporary() method to decide if it
-// should try to reconnect to the network address.
-//
-// Note that gRPC by default performs name resolution on the target passed to
-// NewClient. To bypass name resolution and cause the target string to be
-// passed directly to the dialer here instead, use the "passthrough" resolver
-// by specifying it in the target string, e.g. "passthrough:target".
-//
-// Note: All supported releases of Go (as of December 2023) override the OS
-// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
-// with OS defaults for keepalive time and interval, use a net.Dialer that sets
-// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket
-// option to true from the Control field. For a concrete example of how to do
-// this, see internal.NetDialerWithTCPKeepalive().
-//
-// For more information, please see [issue 23459] in the Go GitHub repo.
-//
-// [issue 23459]: https://github.com/golang/go/issues/23459
-func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.Dialer = f
- })
-}
-
-// WithDialer returns a DialOption that specifies a function to use for dialing
-// network addresses. If FailOnNonTempDialError() is set to true, and an error
-// is returned by f, gRPC checks the error's Temporary() method to decide if it
-// should try to reconnect to the network address.
-//
-// Deprecated: use WithContextDialer instead. Will be supported throughout
-// 1.x.
-func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
- return WithContextDialer(
- func(ctx context.Context, addr string) (net.Conn, error) {
- if deadline, ok := ctx.Deadline(); ok {
- return f(addr, time.Until(deadline))
- }
- return f(addr, 0)
- })
-}
-
-// WithStatsHandler returns a DialOption that specifies the stats handler for
-// all the RPCs and underlying network connections in this ClientConn.
-func WithStatsHandler(h stats.Handler) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- if h == nil {
- logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption")
- // Do not allow a nil stats handler, which would otherwise cause
- // panics.
- return
- }
- o.copts.StatsHandlers = append(o.copts.StatsHandlers, h)
- })
-}
-
-// withBinaryLogger returns a DialOption that specifies the binary logger for
-// this ClientConn.
-func withBinaryLogger(bl binarylog.Logger) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.binaryLogger = bl
- })
-}
-
-// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on
-// non-temporary dial errors. If f is true, and dialer returns a non-temporary
-// error, gRPC will fail the connection to the network address and won't try to
-// reconnect. The default value of FailOnNonTempDialError is false.
-//
-// FailOnNonTempDialError only affects the initial dial, and does not do
-// anything useful unless you are also using WithBlock().
-//
-// Use of this feature is not recommended. For more information, please see:
-// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
-//
-// Deprecated: this DialOption is not supported by NewClient.
-// This API may be changed or removed in a
-// later release.
-func FailOnNonTempDialError(f bool) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.FailOnNonTempDialError = f
- })
-}
-
-// WithUserAgent returns a DialOption that specifies a user agent string for all
-// the RPCs.
-func WithUserAgent(s string) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.UserAgent = s + " " + grpcUA
- })
-}
-
-// WithKeepaliveParams returns a DialOption that specifies keepalive parameters
-// for the client transport.
-//
-// Keepalive is disabled by default.
-func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
- if kp.Time < internal.KeepaliveMinPingTime {
- logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
- kp.Time = internal.KeepaliveMinPingTime
- }
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.KeepaliveParams = kp
- })
-}
-
-// WithUnaryInterceptor returns a DialOption that specifies the interceptor for
-// unary RPCs.
-func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.unaryInt = f
- })
-}
-
-// WithChainUnaryInterceptor returns a DialOption that specifies the chained
-// interceptor for unary RPCs. The first interceptor will be the outer most,
-// while the last interceptor will be the inner most wrapper around the real call.
-// All interceptors added by this method will be chained, and the interceptor
-// defined by WithUnaryInterceptor will always be prepended to the chain.
-func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.chainUnaryInts = append(o.chainUnaryInts, interceptors...)
- })
-}
-
-// WithStreamInterceptor returns a DialOption that specifies the interceptor for
-// streaming RPCs.
-func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.streamInt = f
- })
-}
-
-// WithChainStreamInterceptor returns a DialOption that specifies the chained
-// interceptor for streaming RPCs. The first interceptor will be the outer most,
-// while the last interceptor will be the inner most wrapper around the real call.
-// All interceptors added by this method will be chained, and the interceptor
-// defined by WithStreamInterceptor will always be prepended to the chain.
-func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.chainStreamInts = append(o.chainStreamInts, interceptors...)
- })
-}
-
-// WithAuthority returns a DialOption that specifies the value to be used as the
-// :authority pseudo-header and as the server name in authentication handshake.
-func WithAuthority(a string) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.authority = a
- })
-}
-
-// WithChannelzParentID returns a DialOption that specifies the channelz ID of
-// current ClientConn's parent. This function is used in nested channel creation
-// (e.g. grpclb dial).
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func WithChannelzParentID(c channelz.Identifier) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.channelzParent = c
- })
-}
-
-// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any
-// service config provided by the resolver and provides a hint to the resolver
-// to not fetch service configs.
-//
-// Note that this dial option only disables service config from resolver. If
-// default service config is provided, gRPC will use the default service config.
-func WithDisableServiceConfig() DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.disableServiceConfig = true
- })
-}
-
-// WithDefaultServiceConfig returns a DialOption that configures the default
-// service config, which will be used in cases where:
-//
-// 1. WithDisableServiceConfig is also used, or
-//
-// 2. The name resolver does not provide a service config or provides an
-// invalid service config.
-//
-// The parameter s is the JSON representation of the default service config.
-// For more information about service configs, see:
-// https://github.com/grpc/grpc/blob/master/doc/service_config.md
-// For a simple example of usage, see:
-// examples/features/load_balancing/client/main.go
-func WithDefaultServiceConfig(s string) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.defaultServiceConfigRawJSON = &s
- })
-}
-
-// WithDisableRetry returns a DialOption that disables retries, even if the
-// service config enables them. This does not impact transparent retries, which
-// will happen automatically if no data is written to the wire or if the RPC is
-// unprocessed by the remote server.
-func WithDisableRetry() DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.disableRetry = true
- })
-}
-
-// MaxHeaderListSizeDialOption is a DialOption that specifies the maximum
-// (uncompressed) size of header list that the client is prepared to accept.
-type MaxHeaderListSizeDialOption struct {
- MaxHeaderListSize uint32
-}
-
-func (o MaxHeaderListSizeDialOption) apply(do *dialOptions) {
- do.copts.MaxHeaderListSize = &o.MaxHeaderListSize
-}
-
-// WithMaxHeaderListSize returns a DialOption that specifies the maximum
-// (uncompressed) size of header list that the client is prepared to accept.
-func WithMaxHeaderListSize(s uint32) DialOption {
- return MaxHeaderListSizeDialOption{
- MaxHeaderListSize: s,
- }
-}
-
-// WithDisableHealthCheck disables the LB channel health checking for all
-// SubConns of this ClientConn.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func WithDisableHealthCheck() DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.disableHealthCheck = true
- })
-}
-
-func defaultDialOptions() dialOptions {
- return dialOptions{
- copts: transport.ConnectOptions{
- ReadBufferSize: defaultReadBufSize,
- WriteBufferSize: defaultWriteBufSize,
- UserAgent: grpcUA,
- BufferPool: mem.DefaultBufferPool(),
- },
- bs: internalbackoff.DefaultExponential,
- idleTimeout: 30 * time.Minute,
- defaultScheme: "dns",
- maxCallAttempts: defaultMaxCallAttempts,
- useProxy: true,
- enableLocalDNSResolution: false,
- }
-}
-
-// withMinConnectDeadline specifies the function that clientconn uses to
-// get minConnectDeadline. This can be used to make connection attempts happen
-// faster/slower.
-//
-// For testing purpose only.
-func withMinConnectDeadline(f func() time.Duration) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.minConnectTimeout = f
- })
-}
-
-// withDefaultScheme is used to allow Dial to use "passthrough" as the default
-// name resolver, while NewClient uses "dns" otherwise.
-func withDefaultScheme(s string) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.defaultScheme = s
- })
-}
-
-// WithResolvers allows a list of resolver implementations to be registered
-// locally with the ClientConn without needing to be globally registered via
-// resolver.Register. They will be matched against the scheme used for the
-// current Dial only, and will take precedence over the global registry.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func WithResolvers(rs ...resolver.Builder) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.resolvers = append(o.resolvers, rs...)
- })
-}
-
-// WithIdleTimeout returns a DialOption that configures an idle timeout for the
-// channel. If the channel is idle for the configured timeout, i.e there are no
-// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode
-// and as a result the name resolver and load balancer will be shut down. The
-// channel will exit idle mode when the Connect() method is called or when an
-// RPC is initiated.
-//
-// A default timeout of 30 minutes will be used if this dial option is not set
-// at dial time and idleness can be disabled by passing a timeout of zero.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func WithIdleTimeout(d time.Duration) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.idleTimeout = d
- })
-}
-
-// WithMaxCallAttempts returns a DialOption that configures the maximum number
-// of attempts per call (including retries and hedging) using the channel.
-// Service owners may specify a higher value for these parameters, but higher
-// values will be treated as equal to the maximum value by the client
-// implementation. This mitigates security concerns related to the service
-// config being transferred to the client via DNS.
-//
-// A value of 5 will be used if this dial option is not set or n < 2.
-func WithMaxCallAttempts(n int) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- if n < 2 {
- n = defaultMaxCallAttempts
- }
- o.maxCallAttempts = n
- })
-}
-
-func withBufferPool(bufferPool mem.BufferPool) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.BufferPool = bufferPool
- })
-}
diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go
deleted file mode 100644
index e7b532b6f..000000000
--- a/vendor/google.golang.org/grpc/doc.go
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-//go:generate ./scripts/regenerate.sh
-
-/*
-Package grpc implements an RPC system called gRPC.
-
-See grpc.io for more information about gRPC.
-*/
-package grpc // import "google.golang.org/grpc"
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
deleted file mode 100644
index 11d0ae142..000000000
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package encoding defines the interface for the compressor and codec, and
-// functions to register and retrieve compressors and codecs.
-//
-// # Experimental
-//
-// Notice: This package is EXPERIMENTAL and may be changed or removed in a
-// later release.
-package encoding
-
-import (
- "io"
- "strings"
-
- "google.golang.org/grpc/internal/grpcutil"
-)
-
-// Identity specifies the optional encoding for uncompressed streams.
-// It is intended for grpc internal use only.
-const Identity = "identity"
-
-// Compressor is used for compressing and decompressing when sending or
-// receiving messages.
-//
-// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`,
-// gRPC will invoke it to determine the size of the buffer allocated for the
-// result of decompression. A return value of -1 indicates unknown size.
-type Compressor interface {
- // Compress writes the data written to wc to w after compressing it. If an
- // error occurs while initializing the compressor, that error is returned
- // instead.
- Compress(w io.Writer) (io.WriteCloser, error)
- // Decompress reads data from r, decompresses it, and provides the
- // uncompressed data via the returned io.Reader. If an error occurs while
- // initializing the decompressor, that error is returned instead.
- Decompress(r io.Reader) (io.Reader, error)
- // Name is the name of the compression codec and is used to set the content
- // coding header. The result must be static; the result cannot change
- // between calls.
- Name() string
-}
-
-var registeredCompressor = make(map[string]Compressor)
-
-// RegisterCompressor registers the compressor with gRPC by its name. It can
-// be activated when sending an RPC via grpc.UseCompressor(). It will be
-// automatically accessed when receiving a message based on the content coding
-// header. Servers also use it to send a response with the same encoding as
-// the request.
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple Compressors are
-// registered with the same name, the one registered last will take effect.
-func RegisterCompressor(c Compressor) {
- registeredCompressor[c.Name()] = c
- if !grpcutil.IsCompressorNameRegistered(c.Name()) {
- grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name())
- }
-}
-
-// GetCompressor returns Compressor for the given compressor name.
-func GetCompressor(name string) Compressor {
- return registeredCompressor[name]
-}
-
-// Codec defines the interface gRPC uses to encode and decode messages. Note
-// that implementations of this interface must be thread safe; a Codec's
-// methods can be called from concurrent goroutines.
-type Codec interface {
- // Marshal returns the wire format of v.
- Marshal(v any) ([]byte, error)
- // Unmarshal parses the wire format into v.
- Unmarshal(data []byte, v any) error
- // Name returns the name of the Codec implementation. The returned string
- // will be used as part of content type in transmission. The result must be
- // static; the result cannot change between calls.
- Name() string
-}
-
-var registeredCodecs = make(map[string]any)
-
-// RegisterCodec registers the provided Codec for use with all gRPC clients and
-// servers.
-//
-// The Codec will be stored and looked up by result of its Name() method, which
-// should match the content-subtype of the encoding handled by the Codec. This
-// is case-insensitive, and is stored and looked up as lowercase. If the
-// result of calling Name() is an empty string, RegisterCodec will panic. See
-// Content-Type on
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple Codecs are
-// registered with the same name, the one registered last will take effect.
-func RegisterCodec(codec Codec) {
- if codec == nil {
- panic("cannot register a nil Codec")
- }
- if codec.Name() == "" {
- panic("cannot register Codec with empty string result for Name()")
- }
- contentSubtype := strings.ToLower(codec.Name())
- registeredCodecs[contentSubtype] = codec
-}
-
-// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is
-// registered for the content-subtype.
-//
-// The content-subtype is expected to be lowercase.
-func GetCodec(contentSubtype string) Codec {
- c, _ := registeredCodecs[contentSubtype].(Codec)
- return c
-}
diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go
deleted file mode 100644
index 074c5e234..000000000
--- a/vendor/google.golang.org/grpc/encoding/encoding_v2.go
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package encoding
-
-import (
- "strings"
-
- "google.golang.org/grpc/mem"
-)
-
-// CodecV2 defines the interface gRPC uses to encode and decode messages. Note
-// that implementations of this interface must be thread safe; a CodecV2's
-// methods can be called from concurrent goroutines.
-type CodecV2 interface {
- // Marshal returns the wire format of v. The buffers in the returned
- // [mem.BufferSlice] must have at least one reference each, which will be freed
- // by gRPC when they are no longer needed.
- Marshal(v any) (out mem.BufferSlice, err error)
- // Unmarshal parses the wire format into v. Note that data will be freed as soon
- // as this function returns. If the codec wishes to guarantee access to the data
- // after this function, it must take its own reference that it frees when it is
- // no longer needed.
- Unmarshal(data mem.BufferSlice, v any) error
- // Name returns the name of the Codec implementation. The returned string
- // will be used as part of content type in transmission. The result must be
- // static; the result cannot change between calls.
- Name() string
-}
-
-// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and
-// servers.
-//
-// The CodecV2 will be stored and looked up by result of its Name() method, which
-// should match the content-subtype of the encoding handled by the CodecV2. This
-// is case-insensitive, and is stored and looked up as lowercase. If the
-// result of calling Name() is an empty string, RegisterCodecV2 will panic. See
-// Content-Type on
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-//
-// If both a Codec and CodecV2 are registered with the same name, the CodecV2
-// will be used.
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple Codecs are
-// registered with the same name, the one registered last will take effect.
-func RegisterCodecV2(codec CodecV2) {
- if codec == nil {
- panic("cannot register a nil CodecV2")
- }
- if codec.Name() == "" {
- panic("cannot register CodecV2 with empty string result for Name()")
- }
- contentSubtype := strings.ToLower(codec.Name())
- registeredCodecs[contentSubtype] = codec
-}
-
-// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is
-// registered for the content-subtype.
-//
-// The content-subtype is expected to be lowercase.
-func GetCodecV2(contentSubtype string) CodecV2 {
- c, _ := registeredCodecs[contentSubtype].(CodecV2)
- return c
-}
diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go
deleted file mode 100644
index ceec319dd..000000000
--- a/vendor/google.golang.org/grpc/encoding/proto/proto.go
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package proto defines the protobuf codec. Importing this package will
-// register the codec.
-package proto
-
-import (
- "fmt"
-
- "google.golang.org/grpc/encoding"
- "google.golang.org/grpc/mem"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/protoadapt"
-)
-
-// Name is the name registered for the proto compressor.
-const Name = "proto"
-
-func init() {
- encoding.RegisterCodecV2(&codecV2{})
-}
-
-// codec is a CodecV2 implementation with protobuf. It is the default codec for
-// gRPC.
-type codecV2 struct{}
-
-func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
- vv := messageV2Of(v)
- if vv == nil {
- return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v)
- }
-
- size := proto.Size(vv)
- if mem.IsBelowBufferPoolingThreshold(size) {
- buf, err := proto.Marshal(vv)
- if err != nil {
- return nil, err
- }
- data = append(data, mem.SliceBuffer(buf))
- } else {
- pool := mem.DefaultBufferPool()
- buf := pool.Get(size)
- if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil {
- pool.Put(buf)
- return nil, err
- }
- data = append(data, mem.NewBuffer(buf, pool))
- }
-
- return data, nil
-}
-
-func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) {
- vv := messageV2Of(v)
- if vv == nil {
- return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
- }
-
- buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
- defer buf.Free()
- // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not
- // really possible without a major overhaul of the proto package, but the
- // vtprotobuf library may be able to support this.
- return proto.Unmarshal(buf.ReadOnlyData(), vv)
-}
-
-func messageV2Of(v any) proto.Message {
- switch v := v.(type) {
- case protoadapt.MessageV1:
- return protoadapt.MessageV2Of(v)
- case protoadapt.MessageV2:
- return v
- }
-
- return nil
-}
-
-func (c *codecV2) Name() string {
- return Name
-}
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
deleted file mode 100644
index ad75313a1..000000000
--- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package stats
-
-import (
- "maps"
-
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal"
- "google.golang.org/grpc/stats"
-)
-
-func init() {
- internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting
-}
-
-var logger = grpclog.Component("metrics-registry")
-
-// DefaultMetrics are the default metrics registered through global metrics
-// registry. This is written to at initialization time only, and is read only
-// after initialization.
-var DefaultMetrics = stats.NewMetricSet()
-
-// MetricDescriptor is the data for a registered metric.
-type MetricDescriptor struct {
- // The name of this metric. This name must be unique across the whole binary
- // (including any per call metrics). See
- // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions
- // for metric naming conventions.
- Name string
- // The description of this metric.
- Description string
- // The unit (e.g. entries, seconds) of this metric.
- Unit string
- // The required label keys for this metric. These are intended to
- // metrics emitted from a stats handler.
- Labels []string
- // The optional label keys for this metric. These are intended to attached
- // to metrics emitted from a stats handler if configured.
- OptionalLabels []string
- // Whether this metric is on by default.
- Default bool
- // The type of metric. This is set by the metric registry, and not intended
- // to be set by a component registering a metric.
- Type MetricType
- // Bounds are the bounds of this metric. This only applies to histogram
- // metrics. If unset or set with length 0, stats handlers will fall back to
- // default bounds.
- Bounds []float64
-}
-
-// MetricType is the type of metric.
-type MetricType int
-
-// Type of metric supported by this instrument registry.
-const (
- MetricTypeIntCount MetricType = iota
- MetricTypeFloatCount
- MetricTypeIntHisto
- MetricTypeFloatHisto
- MetricTypeIntGauge
-)
-
-// Int64CountHandle is a typed handle for a int count metric. This handle
-// is passed at the recording point in order to know which metric to record
-// on.
-type Int64CountHandle MetricDescriptor
-
-// Descriptor returns the int64 count handle typecast to a pointer to a
-// MetricDescriptor.
-func (h *Int64CountHandle) Descriptor() *MetricDescriptor {
- return (*MetricDescriptor)(h)
-}
-
-// Record records the int64 count value on the metrics recorder provided.
-func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
- recorder.RecordInt64Count(h, incr, labels...)
-}
-
-// Float64CountHandle is a typed handle for a float count metric. This handle is
-// passed at the recording point in order to know which metric to record on.
-type Float64CountHandle MetricDescriptor
-
-// Descriptor returns the float64 count handle typecast to a pointer to a
-// MetricDescriptor.
-func (h *Float64CountHandle) Descriptor() *MetricDescriptor {
- return (*MetricDescriptor)(h)
-}
-
-// Record records the float64 count value on the metrics recorder provided.
-func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
- recorder.RecordFloat64Count(h, incr, labels...)
-}
-
-// Int64HistoHandle is a typed handle for an int histogram metric. This handle
-// is passed at the recording point in order to know which metric to record on.
-type Int64HistoHandle MetricDescriptor
-
-// Descriptor returns the int64 histo handle typecast to a pointer to a
-// MetricDescriptor.
-func (h *Int64HistoHandle) Descriptor() *MetricDescriptor {
- return (*MetricDescriptor)(h)
-}
-
-// Record records the int64 histo value on the metrics recorder provided.
-func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
- recorder.RecordInt64Histo(h, incr, labels...)
-}
-
-// Float64HistoHandle is a typed handle for a float histogram metric. This
-// handle is passed at the recording point in order to know which metric to
-// record on.
-type Float64HistoHandle MetricDescriptor
-
-// Descriptor returns the float64 histo handle typecast to a pointer to a
-// MetricDescriptor.
-func (h *Float64HistoHandle) Descriptor() *MetricDescriptor {
- return (*MetricDescriptor)(h)
-}
-
-// Record records the float64 histo value on the metrics recorder provided.
-func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
- recorder.RecordFloat64Histo(h, incr, labels...)
-}
-
-// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is
-// passed at the recording point in order to know which metric to record on.
-type Int64GaugeHandle MetricDescriptor
-
-// Descriptor returns the int64 gauge handle typecast to a pointer to a
-// MetricDescriptor.
-func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor {
- return (*MetricDescriptor)(h)
-}
-
-// Record records the int64 histo value on the metrics recorder provided.
-func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
- recorder.RecordInt64Gauge(h, incr, labels...)
-}
-
-// registeredMetrics are the registered metric descriptor names.
-var registeredMetrics = make(map[string]bool)
-
-// metricsRegistry contains all of the registered metrics.
-//
-// This is written to only at init time, and read only after that.
-var metricsRegistry = make(map[string]*MetricDescriptor)
-
-// DescriptorForMetric returns the MetricDescriptor from the global registry.
-//
-// Returns nil if MetricDescriptor not present.
-func DescriptorForMetric(metricName string) *MetricDescriptor {
- return metricsRegistry[metricName]
-}
-
-func registerMetric(metricName string, def bool) {
- if registeredMetrics[metricName] {
- logger.Fatalf("metric %v already registered", metricName)
- }
- registeredMetrics[metricName] = true
- if def {
- DefaultMetrics = DefaultMetrics.Add(metricName)
- }
-}
-
-// RegisterInt64Count registers the metric description onto the global registry.
-// It returns a typed handle to use to recording data.
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple metrics are
-// registered with the same name, this function will panic.
-func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle {
- registerMetric(descriptor.Name, descriptor.Default)
- descriptor.Type = MetricTypeIntCount
- descPtr := &descriptor
- metricsRegistry[descriptor.Name] = descPtr
- return (*Int64CountHandle)(descPtr)
-}
-
-// RegisterFloat64Count registers the metric description onto the global
-// registry. It returns a typed handle to use to recording data.
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple metrics are
-// registered with the same name, this function will panic.
-func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle {
- registerMetric(descriptor.Name, descriptor.Default)
- descriptor.Type = MetricTypeFloatCount
- descPtr := &descriptor
- metricsRegistry[descriptor.Name] = descPtr
- return (*Float64CountHandle)(descPtr)
-}
-
-// RegisterInt64Histo registers the metric description onto the global registry.
-// It returns a typed handle to use to recording data.
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple metrics are
-// registered with the same name, this function will panic.
-func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle {
- registerMetric(descriptor.Name, descriptor.Default)
- descriptor.Type = MetricTypeIntHisto
- descPtr := &descriptor
- metricsRegistry[descriptor.Name] = descPtr
- return (*Int64HistoHandle)(descPtr)
-}
-
-// RegisterFloat64Histo registers the metric description onto the global
-// registry. It returns a typed handle to use to recording data.
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple metrics are
-// registered with the same name, this function will panic.
-func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle {
- registerMetric(descriptor.Name, descriptor.Default)
- descriptor.Type = MetricTypeFloatHisto
- descPtr := &descriptor
- metricsRegistry[descriptor.Name] = descPtr
- return (*Float64HistoHandle)(descPtr)
-}
-
-// RegisterInt64Gauge registers the metric description onto the global registry.
-// It returns a typed handle to use to recording data.
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple metrics are
-// registered with the same name, this function will panic.
-func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle {
- registerMetric(descriptor.Name, descriptor.Default)
- descriptor.Type = MetricTypeIntGauge
- descPtr := &descriptor
- metricsRegistry[descriptor.Name] = descPtr
- return (*Int64GaugeHandle)(descPtr)
-}
-
-// snapshotMetricsRegistryForTesting snapshots the global data of the metrics
-// registry. Returns a cleanup function that sets the metrics registry to its
-// original state.
-func snapshotMetricsRegistryForTesting() func() {
- oldDefaultMetrics := DefaultMetrics
- oldRegisteredMetrics := registeredMetrics
- oldMetricsRegistry := metricsRegistry
-
- registeredMetrics = make(map[string]bool)
- metricsRegistry = make(map[string]*MetricDescriptor)
- maps.Copy(registeredMetrics, registeredMetrics)
- maps.Copy(metricsRegistry, metricsRegistry)
-
- return func() {
- DefaultMetrics = oldDefaultMetrics
- registeredMetrics = oldRegisteredMetrics
- metricsRegistry = oldMetricsRegistry
- }
-}
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
deleted file mode 100644
index ee1423605..000000000
--- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package stats contains experimental metrics/stats API's.
-package stats
-
-import "google.golang.org/grpc/stats"
-
-// MetricsRecorder records on metrics derived from metric registry.
-type MetricsRecorder interface {
- // RecordInt64Count records the measurement alongside labels on the int
- // count associated with the provided handle.
- RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string)
- // RecordFloat64Count records the measurement alongside labels on the float
- // count associated with the provided handle.
- RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string)
- // RecordInt64Histo records the measurement alongside labels on the int
- // histo associated with the provided handle.
- RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string)
- // RecordFloat64Histo records the measurement alongside labels on the float
- // histo associated with the provided handle.
- RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string)
- // RecordInt64Gauge records the measurement alongside labels on the int
- // gauge associated with the provided handle.
- RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
-}
-
-// Metrics is an experimental legacy alias of the now-stable stats.MetricSet.
-// Metrics will be deleted in a future release.
-type Metrics = stats.MetricSet
-
-// Metric was replaced by direct usage of strings.
-type Metric = string
-
-// NewMetrics is an experimental legacy alias of the now-stable
-// stats.NewMetricSet. NewMetrics will be deleted in a future release.
-func NewMetrics(metrics ...Metric) *Metrics {
- return stats.NewMetricSet(metrics...)
-}
diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go
deleted file mode 100644
index f1ae080dc..000000000
--- a/vendor/google.golang.org/grpc/grpclog/component.go
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpclog
-
-import (
- "fmt"
-)
-
-// componentData records the settings for a component.
-type componentData struct {
- name string
-}
-
-var cache = map[string]*componentData{}
-
-func (c *componentData) InfoDepth(depth int, args ...any) {
- args = append([]any{"[" + string(c.name) + "]"}, args...)
- InfoDepth(depth+1, args...)
-}
-
-func (c *componentData) WarningDepth(depth int, args ...any) {
- args = append([]any{"[" + string(c.name) + "]"}, args...)
- WarningDepth(depth+1, args...)
-}
-
-func (c *componentData) ErrorDepth(depth int, args ...any) {
- args = append([]any{"[" + string(c.name) + "]"}, args...)
- ErrorDepth(depth+1, args...)
-}
-
-func (c *componentData) FatalDepth(depth int, args ...any) {
- args = append([]any{"[" + string(c.name) + "]"}, args...)
- FatalDepth(depth+1, args...)
-}
-
-func (c *componentData) Info(args ...any) {
- c.InfoDepth(1, args...)
-}
-
-func (c *componentData) Warning(args ...any) {
- c.WarningDepth(1, args...)
-}
-
-func (c *componentData) Error(args ...any) {
- c.ErrorDepth(1, args...)
-}
-
-func (c *componentData) Fatal(args ...any) {
- c.FatalDepth(1, args...)
-}
-
-func (c *componentData) Infof(format string, args ...any) {
- c.InfoDepth(1, fmt.Sprintf(format, args...))
-}
-
-func (c *componentData) Warningf(format string, args ...any) {
- c.WarningDepth(1, fmt.Sprintf(format, args...))
-}
-
-func (c *componentData) Errorf(format string, args ...any) {
- c.ErrorDepth(1, fmt.Sprintf(format, args...))
-}
-
-func (c *componentData) Fatalf(format string, args ...any) {
- c.FatalDepth(1, fmt.Sprintf(format, args...))
-}
-
-func (c *componentData) Infoln(args ...any) {
- c.InfoDepth(1, args...)
-}
-
-func (c *componentData) Warningln(args ...any) {
- c.WarningDepth(1, args...)
-}
-
-func (c *componentData) Errorln(args ...any) {
- c.ErrorDepth(1, args...)
-}
-
-func (c *componentData) Fatalln(args ...any) {
- c.FatalDepth(1, args...)
-}
-
-func (c *componentData) V(l int) bool {
- return V(l)
-}
-
-// Component creates a new component and returns it for logging. If a component
-// with the name already exists, nothing will be created and it will be
-// returned. SetLoggerV2 will panic if it is called with a logger created by
-// Component.
-func Component(componentName string) DepthLoggerV2 {
- if cData, ok := cache[componentName]; ok {
- return cData
- }
- c := &componentData{componentName}
- cache[componentName] = c
- return c
-}
diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go
deleted file mode 100644
index db320105e..000000000
--- a/vendor/google.golang.org/grpc/grpclog/grpclog.go
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpclog defines logging for grpc.
-//
-// In the default logger, severity level can be set by environment variable
-// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by
-// GRPC_GO_LOG_VERBOSITY_LEVEL.
-package grpclog
-
-import (
- "os"
-
- "google.golang.org/grpc/grpclog/internal"
-)
-
-func init() {
- SetLoggerV2(newLoggerV2())
-}
-
-// V reports whether verbosity level l is at least the requested verbose level.
-func V(l int) bool {
- return internal.LoggerV2Impl.V(l)
-}
-
-// Info logs to the INFO log.
-func Info(args ...any) {
- internal.LoggerV2Impl.Info(args...)
-}
-
-// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
-func Infof(format string, args ...any) {
- internal.LoggerV2Impl.Infof(format, args...)
-}
-
-// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
-func Infoln(args ...any) {
- internal.LoggerV2Impl.Infoln(args...)
-}
-
-// Warning logs to the WARNING log.
-func Warning(args ...any) {
- internal.LoggerV2Impl.Warning(args...)
-}
-
-// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
-func Warningf(format string, args ...any) {
- internal.LoggerV2Impl.Warningf(format, args...)
-}
-
-// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
-func Warningln(args ...any) {
- internal.LoggerV2Impl.Warningln(args...)
-}
-
-// Error logs to the ERROR log.
-func Error(args ...any) {
- internal.LoggerV2Impl.Error(args...)
-}
-
-// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
-func Errorf(format string, args ...any) {
- internal.LoggerV2Impl.Errorf(format, args...)
-}
-
-// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
-func Errorln(args ...any) {
- internal.LoggerV2Impl.Errorln(args...)
-}
-
-// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
-// It calls os.Exit() with exit code 1.
-func Fatal(args ...any) {
- internal.LoggerV2Impl.Fatal(args...)
- // Make sure fatal logs will exit.
- os.Exit(1)
-}
-
-// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
-// It calls os.Exit() with exit code 1.
-func Fatalf(format string, args ...any) {
- internal.LoggerV2Impl.Fatalf(format, args...)
- // Make sure fatal logs will exit.
- os.Exit(1)
-}
-
-// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
-// It calls os.Exit() with exit code 1.
-func Fatalln(args ...any) {
- internal.LoggerV2Impl.Fatalln(args...)
- // Make sure fatal logs will exit.
- os.Exit(1)
-}
-
-// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
-//
-// Deprecated: use Info.
-func Print(args ...any) {
- internal.LoggerV2Impl.Info(args...)
-}
-
-// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
-//
-// Deprecated: use Infof.
-func Printf(format string, args ...any) {
- internal.LoggerV2Impl.Infof(format, args...)
-}
-
-// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
-//
-// Deprecated: use Infoln.
-func Println(args ...any) {
- internal.LoggerV2Impl.Infoln(args...)
-}
-
-// InfoDepth logs to the INFO log at the specified depth.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func InfoDepth(depth int, args ...any) {
- if internal.DepthLoggerV2Impl != nil {
- internal.DepthLoggerV2Impl.InfoDepth(depth, args...)
- } else {
- internal.LoggerV2Impl.Infoln(args...)
- }
-}
-
-// WarningDepth logs to the WARNING log at the specified depth.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func WarningDepth(depth int, args ...any) {
- if internal.DepthLoggerV2Impl != nil {
- internal.DepthLoggerV2Impl.WarningDepth(depth, args...)
- } else {
- internal.LoggerV2Impl.Warningln(args...)
- }
-}
-
-// ErrorDepth logs to the ERROR log at the specified depth.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func ErrorDepth(depth int, args ...any) {
- if internal.DepthLoggerV2Impl != nil {
- internal.DepthLoggerV2Impl.ErrorDepth(depth, args...)
- } else {
- internal.LoggerV2Impl.Errorln(args...)
- }
-}
-
-// FatalDepth logs to the FATAL log at the specified depth.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func FatalDepth(depth int, args ...any) {
- if internal.DepthLoggerV2Impl != nil {
- internal.DepthLoggerV2Impl.FatalDepth(depth, args...)
- } else {
- internal.LoggerV2Impl.Fatalln(args...)
- }
- os.Exit(1)
-}
diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
deleted file mode 100644
index 59c03bc14..000000000
--- a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package internal contains functionality internal to the grpclog package.
-package internal
-
-// LoggerV2Impl is the logger used for the non-depth log functions.
-var LoggerV2Impl LoggerV2
-
-// DepthLoggerV2Impl is the logger used for the depth log functions.
-var DepthLoggerV2Impl DepthLoggerV2
diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go
deleted file mode 100644
index e524fdd40..000000000
--- a/vendor/google.golang.org/grpc/grpclog/internal/logger.go
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package internal
-
-// Logger mimics golang's standard Logger as an interface.
-//
-// Deprecated: use LoggerV2.
-type Logger interface {
- Fatal(args ...any)
- Fatalf(format string, args ...any)
- Fatalln(args ...any)
- Print(args ...any)
- Printf(format string, args ...any)
- Println(args ...any)
-}
-
-// LoggerWrapper wraps Logger into a LoggerV2.
-type LoggerWrapper struct {
- Logger
-}
-
-// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
-func (l *LoggerWrapper) Info(args ...any) {
- l.Logger.Print(args...)
-}
-
-// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
-func (l *LoggerWrapper) Infoln(args ...any) {
- l.Logger.Println(args...)
-}
-
-// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
-func (l *LoggerWrapper) Infof(format string, args ...any) {
- l.Logger.Printf(format, args...)
-}
-
-// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
-func (l *LoggerWrapper) Warning(args ...any) {
- l.Logger.Print(args...)
-}
-
-// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
-func (l *LoggerWrapper) Warningln(args ...any) {
- l.Logger.Println(args...)
-}
-
-// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
-func (l *LoggerWrapper) Warningf(format string, args ...any) {
- l.Logger.Printf(format, args...)
-}
-
-// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
-func (l *LoggerWrapper) Error(args ...any) {
- l.Logger.Print(args...)
-}
-
-// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
-func (l *LoggerWrapper) Errorln(args ...any) {
- l.Logger.Println(args...)
-}
-
-// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
-func (l *LoggerWrapper) Errorf(format string, args ...any) {
- l.Logger.Printf(format, args...)
-}
-
-// V reports whether verbosity level l is at least the requested verbose level.
-func (*LoggerWrapper) V(int) bool {
- // Returns true for all verbose level.
- return true
-}
diff --git a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
deleted file mode 100644
index ed90060c3..000000000
--- a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package internal
-
-import (
- "encoding/json"
- "fmt"
- "io"
- "log"
- "os"
-)
-
-// LoggerV2 does underlying logging work for grpclog.
-type LoggerV2 interface {
- // Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
- Info(args ...any)
- // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
- Infoln(args ...any)
- // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
- Infof(format string, args ...any)
- // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
- Warning(args ...any)
- // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
- Warningln(args ...any)
- // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
- Warningf(format string, args ...any)
- // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
- Error(args ...any)
- // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
- Errorln(args ...any)
- // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
- Errorf(format string, args ...any)
- // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatal(args ...any)
- // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatalln(args ...any)
- // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatalf(format string, args ...any)
- // V reports whether verbosity level l is at least the requested verbose level.
- V(l int) bool
-}
-
-// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
-// DepthLoggerV2, the below functions will be called with the appropriate stack
-// depth set for trivial functions the logger may ignore.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type DepthLoggerV2 interface {
- LoggerV2
- // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
- InfoDepth(depth int, args ...any)
- // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
- WarningDepth(depth int, args ...any)
- // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
- ErrorDepth(depth int, args ...any)
- // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
- FatalDepth(depth int, args ...any)
-}
-
-const (
- // infoLog indicates Info severity.
- infoLog int = iota
- // warningLog indicates Warning severity.
- warningLog
- // errorLog indicates Error severity.
- errorLog
- // fatalLog indicates Fatal severity.
- fatalLog
-)
-
-// severityName contains the string representation of each severity.
-var severityName = []string{
- infoLog: "INFO",
- warningLog: "WARNING",
- errorLog: "ERROR",
- fatalLog: "FATAL",
-}
-
-// sprintf is fmt.Sprintf.
-// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
-var sprintf = fmt.Sprintf
-
-// sprint is fmt.Sprint.
-// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
-var sprint = fmt.Sprint
-
-// sprintln is fmt.Sprintln.
-// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
-var sprintln = fmt.Sprintln
-
-// exit is os.Exit.
-// This var exists to make it possible to test functions calling os.Exit.
-var exit = os.Exit
-
-// loggerT is the default logger used by grpclog.
-type loggerT struct {
- m []*log.Logger
- v int
- jsonFormat bool
-}
-
-func (g *loggerT) output(severity int, s string) {
- sevStr := severityName[severity]
- if !g.jsonFormat {
- g.m[severity].Output(2, sevStr+": "+s)
- return
- }
- // TODO: we can also include the logging component, but that needs more
- // (API) changes.
- b, _ := json.Marshal(map[string]string{
- "severity": sevStr,
- "message": s,
- })
- g.m[severity].Output(2, string(b))
-}
-
-func (g *loggerT) printf(severity int, format string, args ...any) {
- // Note the discard check is duplicated in each print func, rather than in
- // output, to avoid the expensive Sprint calls.
- // De-duplicating this by moving to output would be a significant performance regression!
- if lg := g.m[severity]; lg.Writer() == io.Discard {
- return
- }
- g.output(severity, sprintf(format, args...))
-}
-
-func (g *loggerT) print(severity int, v ...any) {
- if lg := g.m[severity]; lg.Writer() == io.Discard {
- return
- }
- g.output(severity, sprint(v...))
-}
-
-func (g *loggerT) println(severity int, v ...any) {
- if lg := g.m[severity]; lg.Writer() == io.Discard {
- return
- }
- g.output(severity, sprintln(v...))
-}
-
-func (g *loggerT) Info(args ...any) {
- g.print(infoLog, args...)
-}
-
-func (g *loggerT) Infoln(args ...any) {
- g.println(infoLog, args...)
-}
-
-func (g *loggerT) Infof(format string, args ...any) {
- g.printf(infoLog, format, args...)
-}
-
-func (g *loggerT) Warning(args ...any) {
- g.print(warningLog, args...)
-}
-
-func (g *loggerT) Warningln(args ...any) {
- g.println(warningLog, args...)
-}
-
-func (g *loggerT) Warningf(format string, args ...any) {
- g.printf(warningLog, format, args...)
-}
-
-func (g *loggerT) Error(args ...any) {
- g.print(errorLog, args...)
-}
-
-func (g *loggerT) Errorln(args ...any) {
- g.println(errorLog, args...)
-}
-
-func (g *loggerT) Errorf(format string, args ...any) {
- g.printf(errorLog, format, args...)
-}
-
-func (g *loggerT) Fatal(args ...any) {
- g.print(fatalLog, args...)
- exit(1)
-}
-
-func (g *loggerT) Fatalln(args ...any) {
- g.println(fatalLog, args...)
- exit(1)
-}
-
-func (g *loggerT) Fatalf(format string, args ...any) {
- g.printf(fatalLog, format, args...)
- exit(1)
-}
-
-func (g *loggerT) V(l int) bool {
- return l <= g.v
-}
-
-// LoggerV2Config configures the LoggerV2 implementation.
-type LoggerV2Config struct {
- // Verbosity sets the verbosity level of the logger.
- Verbosity int
- // FormatJSON controls whether the logger should output logs in JSON format.
- FormatJSON bool
-}
-
-// combineLoggers returns a combined logger for both higher & lower severity logs,
-// or only one if the other is io.Discard.
-//
-// This uses io.Discard instead of io.MultiWriter when all loggers
-// are set to io.Discard. Both this package and the standard log package have
-// significant optimizations for io.Discard, which io.MultiWriter lacks (as of
-// this writing).
-func combineLoggers(lower, higher io.Writer) io.Writer {
- if lower == io.Discard {
- return higher
- }
- if higher == io.Discard {
- return lower
- }
- return io.MultiWriter(lower, higher)
-}
-
-// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration.
-// The infoW, warningW, and errorW writers are used to write log messages of
-// different severity levels.
-func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 {
- flag := log.LstdFlags
- if c.FormatJSON {
- flag = 0
- }
-
- warningW = combineLoggers(infoW, warningW)
- errorW = combineLoggers(errorW, warningW)
-
- fatalW := errorW
-
- m := []*log.Logger{
- log.New(infoW, "", flag),
- log.New(warningW, "", flag),
- log.New(errorW, "", flag),
- log.New(fatalW, "", flag),
- }
- return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON}
-}
diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go
deleted file mode 100644
index 4b2035857..000000000
--- a/vendor/google.golang.org/grpc/grpclog/logger.go
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpclog
-
-import "google.golang.org/grpc/grpclog/internal"
-
-// Logger mimics golang's standard Logger as an interface.
-//
-// Deprecated: use LoggerV2.
-type Logger internal.Logger
-
-// SetLogger sets the logger that is used in grpc. Call only from
-// init() functions.
-//
-// Deprecated: use SetLoggerV2.
-func SetLogger(l Logger) {
- internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l}
-}
diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
deleted file mode 100644
index 892dc13d1..000000000
--- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpclog
-
-import (
- "io"
- "os"
- "strconv"
- "strings"
-
- "google.golang.org/grpc/grpclog/internal"
-)
-
-// LoggerV2 does underlying logging work for grpclog.
-type LoggerV2 internal.LoggerV2
-
-// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
-// Not mutex-protected, should be called before any gRPC functions.
-func SetLoggerV2(l LoggerV2) {
- if _, ok := l.(*componentData); ok {
- panic("cannot use component logger as grpclog logger")
- }
- internal.LoggerV2Impl = l
- internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2)
-}
-
-// NewLoggerV2 creates a loggerV2 with the provided writers.
-// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1).
-// Error logs will be written to errorW, warningW and infoW.
-// Warning logs will be written to warningW and infoW.
-// Info logs will be written to infoW.
-func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
- return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{})
-}
-
-// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
-// verbosity level.
-func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
- return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v})
-}
-
-// newLoggerV2 creates a loggerV2 to be used as default logger.
-// All logs are written to stderr.
-func newLoggerV2() LoggerV2 {
- errorW := io.Discard
- warningW := io.Discard
- infoW := io.Discard
-
- logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
- switch logLevel {
- case "", "ERROR", "error": // If env is unset, set level to ERROR.
- errorW = os.Stderr
- case "WARNING", "warning":
- warningW = os.Stderr
- case "INFO", "info":
- infoW = os.Stderr
- }
-
- var v int
- vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL")
- if vl, err := strconv.Atoi(vLevel); err == nil {
- v = vl
- }
-
- jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json")
-
- return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{
- Verbosity: v,
- FormatJSON: jsonFormat,
- })
-}
-
-// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
-// DepthLoggerV2, the below functions will be called with the appropriate stack
-// depth set for trivial functions the logger may ignore.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type DepthLoggerV2 internal.DepthLoggerV2
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
deleted file mode 100644
index 94177b05c..000000000
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright 2015 The gRPC Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// The canonical version of this proto can be found at
-// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.36.4
-// protoc v5.27.1
-// source: grpc/health/v1/health.proto
-
-package grpc_health_v1
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
- unsafe "unsafe"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type HealthCheckResponse_ServingStatus int32
-
-const (
- HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0
- HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1
- HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2
- HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3 // Used only by the Watch method.
-)
-
-// Enum value maps for HealthCheckResponse_ServingStatus.
-var (
- HealthCheckResponse_ServingStatus_name = map[int32]string{
- 0: "UNKNOWN",
- 1: "SERVING",
- 2: "NOT_SERVING",
- 3: "SERVICE_UNKNOWN",
- }
- HealthCheckResponse_ServingStatus_value = map[string]int32{
- "UNKNOWN": 0,
- "SERVING": 1,
- "NOT_SERVING": 2,
- "SERVICE_UNKNOWN": 3,
- }
-)
-
-func (x HealthCheckResponse_ServingStatus) Enum() *HealthCheckResponse_ServingStatus {
- p := new(HealthCheckResponse_ServingStatus)
- *p = x
- return p
-}
-
-func (x HealthCheckResponse_ServingStatus) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (HealthCheckResponse_ServingStatus) Descriptor() protoreflect.EnumDescriptor {
- return file_grpc_health_v1_health_proto_enumTypes[0].Descriptor()
-}
-
-func (HealthCheckResponse_ServingStatus) Type() protoreflect.EnumType {
- return &file_grpc_health_v1_health_proto_enumTypes[0]
-}
-
-func (x HealthCheckResponse_ServingStatus) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use HealthCheckResponse_ServingStatus.Descriptor instead.
-func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
- return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1, 0}
-}
-
-type HealthCheckRequest struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *HealthCheckRequest) Reset() {
- *x = HealthCheckRequest{}
- mi := &file_grpc_health_v1_health_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *HealthCheckRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthCheckRequest) ProtoMessage() {}
-
-func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message {
- mi := &file_grpc_health_v1_health_proto_msgTypes[0]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead.
-func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
- return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *HealthCheckRequest) GetService() string {
- if x != nil {
- return x.Service
- }
- return ""
-}
-
-type HealthCheckResponse struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *HealthCheckResponse) Reset() {
- *x = HealthCheckResponse{}
- mi := &file_grpc_health_v1_health_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *HealthCheckResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthCheckResponse) ProtoMessage() {}
-
-func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message {
- mi := &file_grpc_health_v1_health_proto_msgTypes[1]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead.
-func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
- return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
- if x != nil {
- return x.Status
- }
- return HealthCheckResponse_UNKNOWN
-}
-
-var File_grpc_health_v1_health_proto protoreflect.FileDescriptor
-
-var file_grpc_health_v1_health_proto_rawDesc = string([]byte{
- 0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31,
- 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67,
- 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a,
- 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0xb1, 0x01,
- 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
- 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
- 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
- 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x22, 0x4f, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b,
- 0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e,
- 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f,
- 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
- 0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05,
- 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
- 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
- 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63,
- 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74,
- 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52,
- 0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68,
- 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
- 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72,
- 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61,
- 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x30, 0x01, 0x42, 0x70, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65,
- 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
- 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68,
- 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74,
- 0x68, 0x5f, 0x76, 0x31, 0xa2, 0x02, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74,
- 0x68, 0x56, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74,
- 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
-
-var (
- file_grpc_health_v1_health_proto_rawDescOnce sync.Once
- file_grpc_health_v1_health_proto_rawDescData []byte
-)
-
-func file_grpc_health_v1_health_proto_rawDescGZIP() []byte {
- file_grpc_health_v1_health_proto_rawDescOnce.Do(func() {
- file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc)))
- })
- return file_grpc_health_v1_health_proto_rawDescData
-}
-
-var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_grpc_health_v1_health_proto_goTypes = []any{
- (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus
- (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest
- (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse
-}
-var file_grpc_health_v1_health_proto_depIdxs = []int32{
- 0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus
- 1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest
- 1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest
- 2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse
- 2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse
- 3, // [3:5] is the sub-list for method output_type
- 1, // [1:3] is the sub-list for method input_type
- 1, // [1:1] is the sub-list for extension type_name
- 1, // [1:1] is the sub-list for extension extendee
- 0, // [0:1] is the sub-list for field type_name
-}
-
-func init() { file_grpc_health_v1_health_proto_init() }
-func file_grpc_health_v1_health_proto_init() {
- if File_grpc_health_v1_health_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc)),
- NumEnums: 1,
- NumMessages: 2,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_grpc_health_v1_health_proto_goTypes,
- DependencyIndexes: file_grpc_health_v1_health_proto_depIdxs,
- EnumInfos: file_grpc_health_v1_health_proto_enumTypes,
- MessageInfos: file_grpc_health_v1_health_proto_msgTypes,
- }.Build()
- File_grpc_health_v1_health_proto = out.File
- file_grpc_health_v1_health_proto_goTypes = nil
- file_grpc_health_v1_health_proto_depIdxs = nil
-}
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
deleted file mode 100644
index f96b8ab49..000000000
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright 2015 The gRPC Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// The canonical version of this proto can be found at
-// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
-
-// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
-// versions:
-// - protoc-gen-go-grpc v1.5.1
-// - protoc v5.27.1
-// source: grpc/health/v1/health.proto
-
-package grpc_health_v1
-
-import (
- context "context"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-)
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.64.0 or later.
-const _ = grpc.SupportPackageIsVersion9
-
-const (
- Health_Check_FullMethodName = "/grpc.health.v1.Health/Check"
- Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch"
-)
-
-// HealthClient is the client API for Health service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
-//
-// Health is gRPC's mechanism for checking whether a server is able to handle
-// RPCs. Its semantics are documented in
-// https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
-type HealthClient interface {
- // Check gets the health of the specified service. If the requested service
- // is unknown, the call will fail with status NOT_FOUND. If the caller does
- // not specify a service name, the server should respond with its overall
- // health status.
- //
- // Clients should set a deadline when calling Check, and can declare the
- // server unhealthy if they do not receive a timely response.
- //
- // Check implementations should be idempotent and side effect free.
- Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
- // Performs a watch for the serving status of the requested service.
- // The server will immediately send back a message indicating the current
- // serving status. It will then subsequently send a new message whenever
- // the service's serving status changes.
- //
- // If the requested service is unknown when the call is received, the
- // server will send a message setting the serving status to
- // SERVICE_UNKNOWN but will *not* terminate the call. If at some
- // future point, the serving status of the service becomes known, the
- // server will send a new message with the service's serving status.
- //
- // If the call terminates with status UNIMPLEMENTED, then clients
- // should assume this method is not supported and should not retry the
- // call. If the call terminates with any other status (including OK),
- // clients should retry the call with appropriate exponential backoff.
- Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error)
-}
-
-type healthClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewHealthClient(cc grpc.ClientConnInterface) HealthClient {
- return &healthClient{cc}
-}
-
-func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
- cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
- out := new(HealthCheckResponse)
- err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, cOpts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) {
- cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
- stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...)
- if err != nil {
- return nil, err
- }
- x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
-type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse]
-
-// HealthServer is the server API for Health service.
-// All implementations should embed UnimplementedHealthServer
-// for forward compatibility.
-//
-// Health is gRPC's mechanism for checking whether a server is able to handle
-// RPCs. Its semantics are documented in
-// https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
-type HealthServer interface {
- // Check gets the health of the specified service. If the requested service
- // is unknown, the call will fail with status NOT_FOUND. If the caller does
- // not specify a service name, the server should respond with its overall
- // health status.
- //
- // Clients should set a deadline when calling Check, and can declare the
- // server unhealthy if they do not receive a timely response.
- //
- // Check implementations should be idempotent and side effect free.
- Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
- // Performs a watch for the serving status of the requested service.
- // The server will immediately send back a message indicating the current
- // serving status. It will then subsequently send a new message whenever
- // the service's serving status changes.
- //
- // If the requested service is unknown when the call is received, the
- // server will send a message setting the serving status to
- // SERVICE_UNKNOWN but will *not* terminate the call. If at some
- // future point, the serving status of the service becomes known, the
- // server will send a new message with the service's serving status.
- //
- // If the call terminates with status UNIMPLEMENTED, then clients
- // should assume this method is not supported and should not retry the
- // call. If the call terminates with any other status (including OK),
- // clients should retry the call with appropriate exponential backoff.
- Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error
-}
-
-// UnimplementedHealthServer should be embedded to have
-// forward compatible implementations.
-//
-// NOTE: this should be embedded by value instead of pointer to avoid a nil
-// pointer dereference when methods are called.
-type UnimplementedHealthServer struct{}
-
-func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
-}
-func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error {
- return status.Errorf(codes.Unimplemented, "method Watch not implemented")
-}
-func (UnimplementedHealthServer) testEmbeddedByValue() {}
-
-// UnsafeHealthServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to HealthServer will
-// result in compilation errors.
-type UnsafeHealthServer interface {
- mustEmbedUnimplementedHealthServer()
-}
-
-func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) {
- // If the following call panics, it indicates UnimplementedHealthServer was
- // embedded by pointer and is nil. This will cause panics if an
- // unimplemented method is ever invoked, so we test this at initialization
- // time to prevent it from happening at runtime later due to I/O.
- if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
- t.testEmbeddedByValue()
- }
- s.RegisterService(&Health_ServiceDesc, srv)
-}
-
-func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(HealthCheckRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(HealthServer).Check(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: Health_Check_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(HealthCheckRequest)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream})
-}
-
-// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
-type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse]
-
-// Health_ServiceDesc is the grpc.ServiceDesc for Health service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var Health_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "grpc.health.v1.Health",
- HandlerType: (*HealthServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Check",
- Handler: _Health_Check_Handler,
- },
- },
- Streams: []grpc.StreamDesc{
- {
- StreamName: "Watch",
- Handler: _Health_Watch_Handler,
- ServerStreams: true,
- },
- },
- Metadata: "grpc/health/v1/health.proto",
-}
diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go
deleted file mode 100644
index 877d78fc3..000000000
--- a/vendor/google.golang.org/grpc/interceptor.go
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
-)
-
-// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
-type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error
-
-// UnaryClientInterceptor intercepts the execution of a unary RPC on the client.
-// Unary interceptors can be specified as a DialOption, using
-// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a
-// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC
-// delegates all unary RPC invocations to the interceptor, and it is the
-// responsibility of the interceptor to call invoker to complete the processing
-// of the RPC.
-//
-// method is the RPC name. req and reply are the corresponding request and
-// response messages. cc is the ClientConn on which the RPC was invoked. invoker
-// is the handler to complete the RPC and it is the responsibility of the
-// interceptor to call it. opts contain all applicable call options, including
-// defaults from the ClientConn as well as per-call options.
-//
-// The returned error must be compatible with the status package.
-type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
-
-// Streamer is called by StreamClientInterceptor to create a ClientStream.
-type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
-
-// StreamClientInterceptor intercepts the creation of a ClientStream. Stream
-// interceptors can be specified as a DialOption, using WithStreamInterceptor()
-// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream
-// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations
-// to the interceptor, and it is the responsibility of the interceptor to call
-// streamer.
-//
-// desc contains a description of the stream. cc is the ClientConn on which the
-// RPC was invoked. streamer is the handler to create a ClientStream and it is
-// the responsibility of the interceptor to call it. opts contain all applicable
-// call options, including defaults from the ClientConn as well as per-call
-// options.
-//
-// StreamClientInterceptor may return a custom ClientStream to intercept all I/O
-// operations. The returned error must be compatible with the status package.
-type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)
-
-// UnaryServerInfo consists of various information about a unary RPC on
-// server side. All per-rpc information may be mutated by the interceptor.
-type UnaryServerInfo struct {
- // Server is the service implementation the user provides. This is read-only.
- Server any
- // FullMethod is the full RPC method string, i.e., /package.service/method.
- FullMethod string
-}
-
-// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
-// execution of a unary RPC.
-//
-// If a UnaryHandler returns an error, it should either be produced by the
-// status package, or be one of the context errors. Otherwise, gRPC will use
-// codes.Unknown as the status code and err.Error() as the status message of the
-// RPC.
-type UnaryHandler func(ctx context.Context, req any) (any, error)
-
-// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
-// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper
-// of the service method implementation. It is the responsibility of the interceptor to invoke handler
-// to complete the RPC.
-type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error)
-
-// StreamServerInfo consists of various information about a streaming RPC on
-// server side. All per-rpc information may be mutated by the interceptor.
-type StreamServerInfo struct {
- // FullMethod is the full RPC method string, i.e., /package.service/method.
- FullMethod string
- // IsClientStream indicates whether the RPC is a client streaming RPC.
- IsClientStream bool
- // IsServerStream indicates whether the RPC is a server streaming RPC.
- IsServerStream bool
-}
-
-// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server.
-// info contains all the information of this RPC the interceptor can operate on. And handler is the
-// service method implementation. It is the responsibility of the interceptor to invoke handler to
-// complete the RPC.
-type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error
diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
deleted file mode 100644
index b6ae7f258..000000000
--- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package backoff implement the backoff strategy for gRPC.
-//
-// This is kept in internal until the gRPC project decides whether or not to
-// allow alternative backoff strategies.
-package backoff
-
-import (
- "context"
- "errors"
- rand "math/rand/v2"
- "time"
-
- grpcbackoff "google.golang.org/grpc/backoff"
-)
-
-// Strategy defines the methodology for backing off after a grpc connection
-// failure.
-type Strategy interface {
- // Backoff returns the amount of time to wait before the next retry given
- // the number of consecutive failures.
- Backoff(retries int) time.Duration
-}
-
-// DefaultExponential is an exponential backoff implementation using the
-// default values for all the configurable knobs defined in
-// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
-var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig}
-
-// Exponential implements exponential backoff algorithm as defined in
-// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
-type Exponential struct {
- // Config contains all options to configure the backoff algorithm.
- Config grpcbackoff.Config
-}
-
-// Backoff returns the amount of time to wait before the next retry given the
-// number of retries.
-func (bc Exponential) Backoff(retries int) time.Duration {
- if retries == 0 {
- return bc.Config.BaseDelay
- }
- backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay)
- for backoff < max && retries > 0 {
- backoff *= bc.Config.Multiplier
- retries--
- }
- if backoff > max {
- backoff = max
- }
- // Randomize backoff delays so that if a cluster of requests start at
- // the same time, they won't operate in lockstep.
- backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1)
- if backoff < 0 {
- return 0
- }
- return time.Duration(backoff)
-}
-
-// ErrResetBackoff is the error to be returned by the function executed by RunF,
-// to instruct the latter to reset its backoff state.
-var ErrResetBackoff = errors.New("reset backoff state")
-
-// RunF provides a convenient way to run a function f repeatedly until the
-// context expires or f returns a non-nil error that is not ErrResetBackoff.
-// When f returns ErrResetBackoff, RunF continues to run f, but resets its
-// backoff state before doing so. backoff accepts an integer representing the
-// number of retries, and returns the amount of time to backoff.
-func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) {
- attempt := 0
- timer := time.NewTimer(0)
- for ctx.Err() == nil {
- select {
- case <-timer.C:
- case <-ctx.Done():
- timer.Stop()
- return
- }
-
- err := f()
- if errors.Is(err, ErrResetBackoff) {
- timer.Reset(0)
- attempt = 0
- continue
- }
- if err != nil {
- return
- }
- timer.Reset(backoff(attempt))
- attempt++
- }
-}
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
deleted file mode 100644
index 85540f86a..000000000
--- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package gracefulswitch
-
-import (
- "encoding/json"
- "fmt"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/serviceconfig"
-)
-
-type lbConfig struct {
- serviceconfig.LoadBalancingConfig
-
- childBuilder balancer.Builder
- childConfig serviceconfig.LoadBalancingConfig
-}
-
-// ChildName returns the name of the child balancer of the gracefulswitch
-// Balancer.
-func ChildName(l serviceconfig.LoadBalancingConfig) string {
- return l.(*lbConfig).childBuilder.Name()
-}
-
-// ParseConfig parses a child config list and returns a LB config for the
-// gracefulswitch Balancer.
-//
-// cfg is expected to be a json.RawMessage containing a JSON array of LB policy
-// names + configs as the format of the "loadBalancingConfig" field in
-// ServiceConfig. It returns a type that should be passed to
-// UpdateClientConnState in the BalancerConfig field.
-func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
- var lbCfg []map[string]json.RawMessage
- if err := json.Unmarshal(cfg, &lbCfg); err != nil {
- return nil, err
- }
- for i, e := range lbCfg {
- if len(e) != 1 {
- return nil, fmt.Errorf("expected a JSON struct with one entry; received entry %v at index %d", e, i)
- }
-
- var name string
- var jsonCfg json.RawMessage
- for name, jsonCfg = range e {
- }
-
- builder := balancer.Get(name)
- if builder == nil {
- // Skip unregistered balancer names.
- continue
- }
-
- parser, ok := builder.(balancer.ConfigParser)
- if !ok {
- // This is a valid child with no config.
- return &lbConfig{childBuilder: builder}, nil
- }
-
- cfg, err := parser.ParseConfig(jsonCfg)
- if err != nil {
- return nil, fmt.Errorf("error parsing config for policy %q: %v", name, err)
- }
- return &lbConfig{childBuilder: builder, childConfig: cfg}, nil
- }
-
- return nil, fmt.Errorf("no supported policies found in config: %v", string(cfg))
-}
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
deleted file mode 100644
index fbc1ca356..000000000
--- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
+++ /dev/null
@@ -1,417 +0,0 @@
-/*
- *
- * Copyright 2022 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package gracefulswitch implements a graceful switch load balancer.
-package gracefulswitch
-
-import (
- "errors"
- "fmt"
- "sync"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/balancer/base"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/resolver"
-)
-
-var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed")
-var _ balancer.Balancer = (*Balancer)(nil)
-
-// NewBalancer returns a graceful switch Balancer.
-func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer {
- return &Balancer{
- cc: cc,
- bOpts: opts,
- }
-}
-
-// Balancer is a utility to gracefully switch from one balancer to
-// a new balancer. It implements the balancer.Balancer interface.
-type Balancer struct {
- bOpts balancer.BuildOptions
- cc balancer.ClientConn
-
- // mu protects the following fields and all fields within balancerCurrent
- // and balancerPending. mu does not need to be held when calling into the
- // child balancers, as all calls into these children happen only as a direct
- // result of a call into the gracefulSwitchBalancer, which are also
- // guaranteed to be synchronous. There is one exception: an UpdateState call
- // from a child balancer when current and pending are populated can lead to
- // calling Close() on the current. To prevent that racing with an
- // UpdateSubConnState from the channel, we hold currentMu during Close and
- // UpdateSubConnState calls.
- mu sync.Mutex
- balancerCurrent *balancerWrapper
- balancerPending *balancerWrapper
- closed bool // set to true when this balancer is closed
-
- // currentMu must be locked before mu. This mutex guards against this
- // sequence of events: UpdateSubConnState() called, finds the
- // balancerCurrent, gives up lock, updateState comes in, causes Close() on
- // balancerCurrent before the UpdateSubConnState is called on the
- // balancerCurrent.
- currentMu sync.Mutex
-}
-
-// swap swaps out the current lb with the pending lb and updates the ClientConn.
-// The caller must hold gsb.mu.
-func (gsb *Balancer) swap() {
- gsb.cc.UpdateState(gsb.balancerPending.lastState)
- cur := gsb.balancerCurrent
- gsb.balancerCurrent = gsb.balancerPending
- gsb.balancerPending = nil
- go func() {
- gsb.currentMu.Lock()
- defer gsb.currentMu.Unlock()
- cur.Close()
- }()
-}
-
-// Helper function that checks if the balancer passed in is current or pending.
-// The caller must hold gsb.mu.
-func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool {
- return bw == gsb.balancerCurrent || bw == gsb.balancerPending
-}
-
-// SwitchTo initializes the graceful switch process, which completes based on
-// connectivity state changes on the current/pending balancer. Thus, the switch
-// process is not complete when this method returns. This method must be called
-// synchronously alongside the rest of the balancer.Balancer methods this
-// Graceful Switch Balancer implements.
-//
-// Deprecated: use ParseConfig and pass a parsed config to UpdateClientConnState
-// to cause the Balancer to automatically change to the new child when necessary.
-func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
- _, err := gsb.switchTo(builder)
- return err
-}
-
-func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error) {
- gsb.mu.Lock()
- if gsb.closed {
- gsb.mu.Unlock()
- return nil, errBalancerClosed
- }
- bw := &balancerWrapper{
- ClientConn: gsb.cc,
- builder: builder,
- gsb: gsb,
- lastState: balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable),
- },
- subconns: make(map[balancer.SubConn]bool),
- }
- balToClose := gsb.balancerPending // nil if there is no pending balancer
- if gsb.balancerCurrent == nil {
- gsb.balancerCurrent = bw
- } else {
- gsb.balancerPending = bw
- }
- gsb.mu.Unlock()
- balToClose.Close()
- // This function takes a builder instead of a balancer because builder.Build
- // can call back inline, and this utility needs to handle the callbacks.
- newBalancer := builder.Build(bw, gsb.bOpts)
- if newBalancer == nil {
- // This is illegal and should never happen; we clear the balancerWrapper
- // we were constructing if it happens to avoid a potential panic.
- gsb.mu.Lock()
- if gsb.balancerPending != nil {
- gsb.balancerPending = nil
- } else {
- gsb.balancerCurrent = nil
- }
- gsb.mu.Unlock()
- return nil, balancer.ErrBadResolverState
- }
-
- // This write doesn't need to take gsb.mu because this field never gets read
- // or written to on any calls from the current or pending. Calls from grpc
- // to this balancer are guaranteed to be called synchronously, so this
- // bw.Balancer field will never be forwarded to until this SwitchTo()
- // function returns.
- bw.Balancer = newBalancer
- return bw, nil
-}
-
-// Returns nil if the graceful switch balancer is closed.
-func (gsb *Balancer) latestBalancer() *balancerWrapper {
- gsb.mu.Lock()
- defer gsb.mu.Unlock()
- if gsb.balancerPending != nil {
- return gsb.balancerPending
- }
- return gsb.balancerCurrent
-}
-
-// UpdateClientConnState forwards the update to the latest balancer created.
-//
-// If the state's BalancerConfig is the config returned by a call to
-// gracefulswitch.ParseConfig, then this function will automatically SwitchTo
-// the balancer indicated by the config before forwarding its config to it, if
-// necessary.
-func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error {
- // The resolver data is only relevant to the most recent LB Policy.
- balToUpdate := gsb.latestBalancer()
- gsbCfg, ok := state.BalancerConfig.(*lbConfig)
- if ok {
- // Switch to the child in the config unless it is already active.
- if balToUpdate == nil || gsbCfg.childBuilder.Name() != balToUpdate.builder.Name() {
- var err error
- balToUpdate, err = gsb.switchTo(gsbCfg.childBuilder)
- if err != nil {
- return fmt.Errorf("could not switch to new child balancer: %w", err)
- }
- }
- // Unwrap the child balancer's config.
- state.BalancerConfig = gsbCfg.childConfig
- }
-
- if balToUpdate == nil {
- return errBalancerClosed
- }
-
- // Perform this call without gsb.mu to prevent deadlocks if the child calls
- // back into the channel. The latest balancer can never be closed during a
- // call from the channel, even without gsb.mu held.
- return balToUpdate.UpdateClientConnState(state)
-}
-
-// ResolverError forwards the error to the latest balancer created.
-func (gsb *Balancer) ResolverError(err error) {
- // The resolver data is only relevant to the most recent LB Policy.
- balToUpdate := gsb.latestBalancer()
- if balToUpdate == nil {
- gsb.cc.UpdateState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: base.NewErrPicker(err),
- })
- return
- }
- // Perform this call without gsb.mu to prevent deadlocks if the child calls
- // back into the channel. The latest balancer can never be closed during a
- // call from the channel, even without gsb.mu held.
- balToUpdate.ResolverError(err)
-}
-
-// ExitIdle forwards the call to the latest balancer created.
-//
-// If the latest balancer does not support ExitIdle, the subConns are
-// re-connected to manually.
-func (gsb *Balancer) ExitIdle() {
- balToUpdate := gsb.latestBalancer()
- if balToUpdate == nil {
- return
- }
- // There is no need to protect this read with a mutex, as the write to the
- // Balancer field happens in SwitchTo, which completes before this can be
- // called.
- if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok {
- ei.ExitIdle()
- return
- }
- gsb.mu.Lock()
- defer gsb.mu.Unlock()
- for sc := range balToUpdate.subconns {
- sc.Connect()
- }
-}
-
-// updateSubConnState forwards the update to the appropriate child.
-func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) {
- gsb.currentMu.Lock()
- defer gsb.currentMu.Unlock()
- gsb.mu.Lock()
- // Forward update to the appropriate child. Even if there is a pending
- // balancer, the current balancer should continue to get SubConn updates to
- // maintain the proper state while the pending is still connecting.
- var balToUpdate *balancerWrapper
- if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] {
- balToUpdate = gsb.balancerCurrent
- } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] {
- balToUpdate = gsb.balancerPending
- }
- if balToUpdate == nil {
- // SubConn belonged to a stale lb policy that has not yet fully closed,
- // or the balancer was already closed.
- gsb.mu.Unlock()
- return
- }
- if state.ConnectivityState == connectivity.Shutdown {
- delete(balToUpdate.subconns, sc)
- }
- gsb.mu.Unlock()
- if cb != nil {
- cb(state)
- } else {
- balToUpdate.UpdateSubConnState(sc, state)
- }
-}
-
-// UpdateSubConnState forwards the update to the appropriate child.
-func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
- gsb.updateSubConnState(sc, state, nil)
-}
-
-// Close closes any active child balancers.
-func (gsb *Balancer) Close() {
- gsb.mu.Lock()
- gsb.closed = true
- currentBalancerToClose := gsb.balancerCurrent
- gsb.balancerCurrent = nil
- pendingBalancerToClose := gsb.balancerPending
- gsb.balancerPending = nil
- gsb.mu.Unlock()
-
- currentBalancerToClose.Close()
- pendingBalancerToClose.Close()
-}
-
-// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer
-// methods to help cleanup SubConns created by the wrapped balancer.
-//
-// It implements the balancer.ClientConn interface and is passed down in that
-// capacity to the wrapped balancer. It maintains a set of subConns created by
-// the wrapped balancer and calls from the latter to create/update/shutdown
-// SubConns update this set before being forwarded to the parent ClientConn.
-// State updates from the wrapped balancer can result in invocation of the
-// graceful switch logic.
-type balancerWrapper struct {
- balancer.ClientConn
- balancer.Balancer
- gsb *Balancer
- builder balancer.Builder
-
- lastState balancer.State
- subconns map[balancer.SubConn]bool // subconns created by this balancer
-}
-
-// Close closes the underlying LB policy and shuts down the subconns it
-// created. bw must not be referenced via balancerCurrent or balancerPending in
-// gsb when called. gsb.mu must not be held. Does not panic with a nil
-// receiver.
-func (bw *balancerWrapper) Close() {
- // before Close is called.
- if bw == nil {
- return
- }
- // There is no need to protect this read with a mutex, as Close() is
- // impossible to be called concurrently with the write in SwitchTo(). The
- // callsites of Close() for this balancer in Graceful Switch Balancer will
- // never be called until SwitchTo() returns.
- bw.Balancer.Close()
- bw.gsb.mu.Lock()
- for sc := range bw.subconns {
- sc.Shutdown()
- }
- bw.gsb.mu.Unlock()
-}
-
-func (bw *balancerWrapper) UpdateState(state balancer.State) {
- // Hold the mutex for this entire call to ensure it cannot occur
- // concurrently with other updateState() calls. This causes updates to
- // lastState and calls to cc.UpdateState to happen atomically.
- bw.gsb.mu.Lock()
- defer bw.gsb.mu.Unlock()
- bw.lastState = state
-
- if !bw.gsb.balancerCurrentOrPending(bw) {
- return
- }
-
- if bw == bw.gsb.balancerCurrent {
- // In the case that the current balancer exits READY, and there is a pending
- // balancer, you can forward the pending balancer's cached State up to
- // ClientConn and swap the pending into the current. This is because there
- // is no reason to gracefully switch from and keep using the old policy as
- // the ClientConn is not connected to any backends.
- if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil {
- bw.gsb.swap()
- return
- }
- // Even if there is a pending balancer waiting to be gracefully switched to,
- // continue to forward current balancer updates to the Client Conn. Ignoring
- // state + picker from the current would cause undefined behavior/cause the
- // system to behave incorrectly from the current LB policies perspective.
- // Also, the current LB is still being used by grpc to choose SubConns per
- // RPC, and thus should use the most updated form of the current balancer.
- bw.gsb.cc.UpdateState(state)
- return
- }
- // This method is now dealing with a state update from the pending balancer.
- // If the current balancer is currently in a state other than READY, the new
- // policy can be swapped into place immediately. This is because there is no
- // reason to gracefully switch from and keep using the old policy as the
- // ClientConn is not connected to any backends.
- if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready {
- bw.gsb.swap()
- }
-}
-
-func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
- bw.gsb.mu.Lock()
- if !bw.gsb.balancerCurrentOrPending(bw) {
- bw.gsb.mu.Unlock()
- return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
- }
- bw.gsb.mu.Unlock()
-
- var sc balancer.SubConn
- oldListener := opts.StateListener
- opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) }
- sc, err := bw.gsb.cc.NewSubConn(addrs, opts)
- if err != nil {
- return nil, err
- }
- bw.gsb.mu.Lock()
- if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call
- sc.Shutdown()
- bw.gsb.mu.Unlock()
- return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
- }
- bw.subconns[sc] = true
- bw.gsb.mu.Unlock()
- return sc, nil
-}
-
-func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) {
- // Ignore ResolveNow requests from anything other than the most recent
- // balancer, because older balancers were already removed from the config.
- if bw != bw.gsb.latestBalancer() {
- return
- }
- bw.gsb.cc.ResolveNow(opts)
-}
-
-func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) {
- // Note: existing third party balancers may call this, so it must remain
- // until RemoveSubConn is fully removed.
- sc.Shutdown()
-}
-
-func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
- bw.gsb.mu.Lock()
- if !bw.gsb.balancerCurrentOrPending(bw) {
- bw.gsb.mu.Unlock()
- return
- }
- bw.gsb.mu.Unlock()
- bw.gsb.cc.UpdateAddresses(sc, addrs)
-}
diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go
deleted file mode 100644
index 94a08d687..000000000
--- a/vendor/google.golang.org/grpc/internal/balancerload/load.go
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright 2019 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Package balancerload defines APIs to parse server loads in trailers. The
-// parsed loads are sent to balancers in DoneInfo.
-package balancerload
-
-import (
- "google.golang.org/grpc/metadata"
-)
-
-// Parser converts loads from metadata into a concrete type.
-type Parser interface {
- // Parse parses loads from metadata.
- Parse(md metadata.MD) any
-}
-
-var parser Parser
-
-// SetParser sets the load parser.
-//
-// Not mutex-protected, should be called before any gRPC functions.
-func SetParser(lr Parser) {
- parser = lr
-}
-
-// Parse calls parser.Read().
-func Parse(md metadata.MD) any {
- if parser == nil {
- return nil
- }
- return parser.Parse(md)
-}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
deleted file mode 100644
index 755fdebc1..000000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package binarylog implementation binary logging as defined in
-// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md.
-package binarylog
-
-import (
- "fmt"
- "os"
-
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/grpcutil"
-)
-
-var grpclogLogger = grpclog.Component("binarylog")
-
-// Logger specifies MethodLoggers for method names with a Log call that
-// takes a context.
-//
-// This is used in the 1.0 release of gcp/observability, and thus must not be
-// deleted or changed.
-type Logger interface {
- GetMethodLogger(methodName string) MethodLogger
-}
-
-// binLogger is the global binary logger for the binary. One of this should be
-// built at init time from the configuration (environment variable or flags).
-//
-// It is used to get a MethodLogger for each individual method.
-var binLogger Logger
-
-// SetLogger sets the binary logger.
-//
-// Only call this at init time.
-func SetLogger(l Logger) {
- binLogger = l
-}
-
-// GetLogger gets the binary logger.
-//
-// Only call this at init time.
-func GetLogger() Logger {
- return binLogger
-}
-
-// GetMethodLogger returns the MethodLogger for the given methodName.
-//
-// methodName should be in the format of "/service/method".
-//
-// Each MethodLogger returned by this method is a new instance. This is to
-// generate sequence id within the call.
-func GetMethodLogger(methodName string) MethodLogger {
- if binLogger == nil {
- return nil
- }
- return binLogger.GetMethodLogger(methodName)
-}
-
-func init() {
- const envStr = "GRPC_BINARY_LOG_FILTER"
- configStr := os.Getenv(envStr)
- binLogger = NewLoggerFromConfigString(configStr)
-}
-
-// MethodLoggerConfig contains the setting for logging behavior of a method
-// logger. Currently, it contains the max length of header and message.
-type MethodLoggerConfig struct {
- // Max length of header and message.
- Header, Message uint64
-}
-
-// LoggerConfig contains the config for loggers to create method loggers.
-type LoggerConfig struct {
- All *MethodLoggerConfig
- Services map[string]*MethodLoggerConfig
- Methods map[string]*MethodLoggerConfig
-
- Blacklist map[string]struct{}
-}
-
-type logger struct {
- config LoggerConfig
-}
-
-// NewLoggerFromConfig builds a logger with the given LoggerConfig.
-func NewLoggerFromConfig(config LoggerConfig) Logger {
- return &logger{config: config}
-}
-
-// newEmptyLogger creates an empty logger. The map fields need to be filled in
-// using the set* functions.
-func newEmptyLogger() *logger {
- return &logger{}
-}
-
-// Set method logger for "*".
-func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error {
- if l.config.All != nil {
- return fmt.Errorf("conflicting global rules found")
- }
- l.config.All = ml
- return nil
-}
-
-// Set method logger for "service/*".
-//
-// New MethodLogger with same service overrides the old one.
-func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error {
- if _, ok := l.config.Services[service]; ok {
- return fmt.Errorf("conflicting service rules for service %v found", service)
- }
- if l.config.Services == nil {
- l.config.Services = make(map[string]*MethodLoggerConfig)
- }
- l.config.Services[service] = ml
- return nil
-}
-
-// Set method logger for "service/method".
-//
-// New MethodLogger with same method overrides the old one.
-func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error {
- if _, ok := l.config.Blacklist[method]; ok {
- return fmt.Errorf("conflicting blacklist rules for method %v found", method)
- }
- if _, ok := l.config.Methods[method]; ok {
- return fmt.Errorf("conflicting method rules for method %v found", method)
- }
- if l.config.Methods == nil {
- l.config.Methods = make(map[string]*MethodLoggerConfig)
- }
- l.config.Methods[method] = ml
- return nil
-}
-
-// Set blacklist method for "-service/method".
-func (l *logger) setBlacklist(method string) error {
- if _, ok := l.config.Blacklist[method]; ok {
- return fmt.Errorf("conflicting blacklist rules for method %v found", method)
- }
- if _, ok := l.config.Methods[method]; ok {
- return fmt.Errorf("conflicting method rules for method %v found", method)
- }
- if l.config.Blacklist == nil {
- l.config.Blacklist = make(map[string]struct{})
- }
- l.config.Blacklist[method] = struct{}{}
- return nil
-}
-
-// getMethodLogger returns the MethodLogger for the given methodName.
-//
-// methodName should be in the format of "/service/method".
-//
-// Each MethodLogger returned by this method is a new instance. This is to
-// generate sequence id within the call.
-func (l *logger) GetMethodLogger(methodName string) MethodLogger {
- s, m, err := grpcutil.ParseMethod(methodName)
- if err != nil {
- grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
- return nil
- }
- if ml, ok := l.config.Methods[s+"/"+m]; ok {
- return NewTruncatingMethodLogger(ml.Header, ml.Message)
- }
- if _, ok := l.config.Blacklist[s+"/"+m]; ok {
- return nil
- }
- if ml, ok := l.config.Services[s]; ok {
- return NewTruncatingMethodLogger(ml.Header, ml.Message)
- }
- if l.config.All == nil {
- return nil
- }
- return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message)
-}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
deleted file mode 100644
index 1ee00a39a..000000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// This file contains exported variables/functions that are exported for testing
-// only.
-//
-// An ideal way for this would be to put those in a *_test.go but in binarylog
-// package. But this doesn't work with staticcheck with go module. Error was:
-// "MdToMetadataProto not declared by package binarylog". This could be caused
-// by the way staticcheck looks for files for a certain package, which doesn't
-// support *_test.go files.
-//
-// Move those to binary_test.go when staticcheck is fixed.
-
-package binarylog
-
-var (
- // AllLogger is a logger that logs all headers/messages for all RPCs. It's
- // for testing only.
- AllLogger = NewLoggerFromConfigString("*")
- // MdToMetadataProto converts metadata to a binary logging proto message.
- // It's for testing only.
- MdToMetadataProto = mdToMetadataProto
- // AddrToProto converts an address to a binary logging proto message. It's
- // for testing only.
- AddrToProto = addrToProto
-)
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
deleted file mode 100644
index f9e80e27a..000000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package binarylog
-
-import (
- "errors"
- "fmt"
- "regexp"
- "strconv"
- "strings"
-)
-
-// NewLoggerFromConfigString reads the string and build a logger. It can be used
-// to build a new logger and assign it to binarylog.Logger.
-//
-// Example filter config strings:
-// - "" Nothing will be logged
-// - "*" All headers and messages will be fully logged.
-// - "*{h}" Only headers will be logged.
-// - "*{m:256}" Only the first 256 bytes of each message will be logged.
-// - "Foo/*" Logs every method in service Foo
-// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
-// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
-// /Foo/Bar, logs all headers and messages in every other method in service
-// Foo.
-//
-// If two configs exist for one certain method or service, the one specified
-// later overrides the previous config.
-func NewLoggerFromConfigString(s string) Logger {
- if s == "" {
- return nil
- }
- l := newEmptyLogger()
- methods := strings.Split(s, ",")
- for _, method := range methods {
- if err := l.fillMethodLoggerWithConfigString(method); err != nil {
- grpclogLogger.Warningf("failed to parse binary log config: %v", err)
- return nil
- }
- }
- return l
-}
-
-// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds
-// it to the right map in the logger.
-func (l *logger) fillMethodLoggerWithConfigString(config string) error {
- // "" is invalid.
- if config == "" {
- return errors.New("empty string is not a valid method binary logging config")
- }
-
- // "-service/method", blacklist, no * or {} allowed.
- if config[0] == '-' {
- s, m, suffix, err := parseMethodConfigAndSuffix(config[1:])
- if err != nil {
- return fmt.Errorf("invalid config: %q, %v", config, err)
- }
- if m == "*" {
- return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config")
- }
- if suffix != "" {
- return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config")
- }
- if err := l.setBlacklist(s + "/" + m); err != nil {
- return fmt.Errorf("invalid config: %v", err)
- }
- return nil
- }
-
- // "*{h:256;m:256}"
- if config[0] == '*' {
- hdr, msg, err := parseHeaderMessageLengthConfig(config[1:])
- if err != nil {
- return fmt.Errorf("invalid config: %q, %v", config, err)
- }
- if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
- return fmt.Errorf("invalid config: %v", err)
- }
- return nil
- }
-
- s, m, suffix, err := parseMethodConfigAndSuffix(config)
- if err != nil {
- return fmt.Errorf("invalid config: %q, %v", config, err)
- }
- hdr, msg, err := parseHeaderMessageLengthConfig(suffix)
- if err != nil {
- return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
- }
- if m == "*" {
- if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
- return fmt.Errorf("invalid config: %v", err)
- }
- } else {
- if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
- return fmt.Errorf("invalid config: %v", err)
- }
- }
- return nil
-}
-
-const (
- // TODO: this const is only used by env_config now. But could be useful for
- // other config. Move to binarylog.go if necessary.
- maxUInt = ^uint64(0)
-
- // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for
- // expected output.
- longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$`
-
- // For suffix from above, "{h:123,m:123}". See test for expected output.
- optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123".
- headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$`
- messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$`
- headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$`
-)
-
-var (
- longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr)
- headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr)
- messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr)
- headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr)
-)
-
-// Turn "service/method{h;m}" into "service", "method", "{h;m}".
-func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) {
- // Regexp result:
- //
- // in: "p.s/m{h:123,m:123}",
- // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"},
- match := longMethodConfigRegexp.FindStringSubmatch(c)
- if match == nil {
- return "", "", "", fmt.Errorf("%q contains invalid substring", c)
- }
- service = match[1]
- method = match[2]
- suffix = match[3]
- return
-}
-
-// Turn "{h:123;m:345}" into 123, 345.
-//
-// Return maxUInt if length is unspecified.
-func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) {
- if c == "" {
- return maxUInt, maxUInt, nil
- }
- // Header config only.
- if match := headerConfigRegexp.FindStringSubmatch(c); match != nil {
- if s := match[1]; s != "" {
- hdrLenStr, err = strconv.ParseUint(s, 10, 64)
- if err != nil {
- return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
- }
- return hdrLenStr, 0, nil
- }
- return maxUInt, 0, nil
- }
-
- // Message config only.
- if match := messageConfigRegexp.FindStringSubmatch(c); match != nil {
- if s := match[1]; s != "" {
- msgLenStr, err = strconv.ParseUint(s, 10, 64)
- if err != nil {
- return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
- }
- return 0, msgLenStr, nil
- }
- return 0, maxUInt, nil
- }
-
- // Header and message config both.
- if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil {
- // Both hdr and msg are specified, but one or two of them might be empty.
- hdrLenStr = maxUInt
- msgLenStr = maxUInt
- if s := match[1]; s != "" {
- hdrLenStr, err = strconv.ParseUint(s, 10, 64)
- if err != nil {
- return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
- }
- }
- if s := match[2]; s != "" {
- msgLenStr, err = strconv.ParseUint(s, 10, 64)
- if err != nil {
- return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
- }
- }
- return hdrLenStr, msgLenStr, nil
- }
- return 0, 0, fmt.Errorf("%q contains invalid substring", c)
-}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
deleted file mode 100644
index 966932891..000000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
+++ /dev/null
@@ -1,446 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package binarylog
-
-import (
- "context"
- "net"
- "strings"
- "sync/atomic"
- "time"
-
- binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/types/known/durationpb"
- "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-type callIDGenerator struct {
- id uint64
-}
-
-func (g *callIDGenerator) next() uint64 {
- id := atomic.AddUint64(&g.id, 1)
- return id
-}
-
-// reset is for testing only, and doesn't need to be thread safe.
-func (g *callIDGenerator) reset() {
- g.id = 0
-}
-
-var idGen callIDGenerator
-
-// MethodLogger is the sub-logger for each method.
-//
-// This is used in the 1.0 release of gcp/observability, and thus must not be
-// deleted or changed.
-type MethodLogger interface {
- Log(context.Context, LogEntryConfig)
-}
-
-// TruncatingMethodLogger is a method logger that truncates headers and messages
-// based on configured fields.
-type TruncatingMethodLogger struct {
- headerMaxLen, messageMaxLen uint64
-
- callID uint64
- idWithinCallGen *callIDGenerator
-
- sink Sink // TODO(blog): make this pluggable.
-}
-
-// NewTruncatingMethodLogger returns a new truncating method logger.
-//
-// This is used in the 1.0 release of gcp/observability, and thus must not be
-// deleted or changed.
-func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
- return &TruncatingMethodLogger{
- headerMaxLen: h,
- messageMaxLen: m,
-
- callID: idGen.next(),
- idWithinCallGen: &callIDGenerator{},
-
- sink: DefaultSink, // TODO(blog): make it pluggable.
- }
-}
-
-// Build is an internal only method for building the proto message out of the
-// input event. It's made public to enable other library to reuse as much logic
-// in TruncatingMethodLogger as possible.
-func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry {
- m := c.toProto()
- timestamp := timestamppb.Now()
- m.Timestamp = timestamp
- m.CallId = ml.callID
- m.SequenceIdWithinCall = ml.idWithinCallGen.next()
-
- switch pay := m.Payload.(type) {
- case *binlogpb.GrpcLogEntry_ClientHeader:
- m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
- case *binlogpb.GrpcLogEntry_ServerHeader:
- m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
- case *binlogpb.GrpcLogEntry_Message:
- m.PayloadTruncated = ml.truncateMessage(pay.Message)
- }
- return m
-}
-
-// Log creates a proto binary log entry, and logs it to the sink.
-func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) {
- ml.sink.Write(ml.Build(c))
-}
-
-func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) {
- if ml.headerMaxLen == maxUInt {
- return false
- }
- var (
- bytesLimit = ml.headerMaxLen
- index int
- )
- // At the end of the loop, index will be the first entry where the total
- // size is greater than the limit:
- //
- // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr.
- for ; index < len(mdPb.Entry); index++ {
- entry := mdPb.Entry[index]
- if entry.Key == "grpc-trace-bin" {
- // "grpc-trace-bin" is a special key. It's kept in the log entry,
- // but not counted towards the size limit.
- continue
- }
- currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue()))
- if currentEntryLen > bytesLimit {
- break
- }
- bytesLimit -= currentEntryLen
- }
- truncated = index < len(mdPb.Entry)
- mdPb.Entry = mdPb.Entry[:index]
- return truncated
-}
-
-func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) {
- if ml.messageMaxLen == maxUInt {
- return false
- }
- if ml.messageMaxLen >= uint64(len(msgPb.Data)) {
- return false
- }
- msgPb.Data = msgPb.Data[:ml.messageMaxLen]
- return true
-}
-
-// LogEntryConfig represents the configuration for binary log entry.
-//
-// This is used in the 1.0 release of gcp/observability, and thus must not be
-// deleted or changed.
-type LogEntryConfig interface {
- toProto() *binlogpb.GrpcLogEntry
-}
-
-// ClientHeader configs the binary log entry to be a ClientHeader entry.
-type ClientHeader struct {
- OnClientSide bool
- Header metadata.MD
- MethodName string
- Authority string
- Timeout time.Duration
- // PeerAddr is required only when it's on server side.
- PeerAddr net.Addr
-}
-
-func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry {
- // This function doesn't need to set all the fields (e.g. seq ID). The Log
- // function will set the fields when necessary.
- clientHeader := &binlogpb.ClientHeader{
- Metadata: mdToMetadataProto(c.Header),
- MethodName: c.MethodName,
- Authority: c.Authority,
- }
- if c.Timeout > 0 {
- clientHeader.Timeout = durationpb.New(c.Timeout)
- }
- ret := &binlogpb.GrpcLogEntry{
- Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
- Payload: &binlogpb.GrpcLogEntry_ClientHeader{
- ClientHeader: clientHeader,
- },
- }
- if c.OnClientSide {
- ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
- } else {
- ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
- }
- if c.PeerAddr != nil {
- ret.Peer = addrToProto(c.PeerAddr)
- }
- return ret
-}
-
-// ServerHeader configs the binary log entry to be a ServerHeader entry.
-type ServerHeader struct {
- OnClientSide bool
- Header metadata.MD
- // PeerAddr is required only when it's on client side.
- PeerAddr net.Addr
-}
-
-func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry {
- ret := &binlogpb.GrpcLogEntry{
- Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
- Payload: &binlogpb.GrpcLogEntry_ServerHeader{
- ServerHeader: &binlogpb.ServerHeader{
- Metadata: mdToMetadataProto(c.Header),
- },
- },
- }
- if c.OnClientSide {
- ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
- } else {
- ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
- }
- if c.PeerAddr != nil {
- ret.Peer = addrToProto(c.PeerAddr)
- }
- return ret
-}
-
-// ClientMessage configs the binary log entry to be a ClientMessage entry.
-type ClientMessage struct {
- OnClientSide bool
- // Message can be a proto.Message or []byte. Other messages formats are not
- // supported.
- Message any
-}
-
-func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry {
- var (
- data []byte
- err error
- )
- if m, ok := c.Message.(proto.Message); ok {
- data, err = proto.Marshal(m)
- if err != nil {
- grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
- }
- } else if b, ok := c.Message.([]byte); ok {
- data = b
- } else {
- grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
- }
- ret := &binlogpb.GrpcLogEntry{
- Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
- Payload: &binlogpb.GrpcLogEntry_Message{
- Message: &binlogpb.Message{
- Length: uint32(len(data)),
- Data: data,
- },
- },
- }
- if c.OnClientSide {
- ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
- } else {
- ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
- }
- return ret
-}
-
-// ServerMessage configs the binary log entry to be a ServerMessage entry.
-type ServerMessage struct {
- OnClientSide bool
- // Message can be a proto.Message or []byte. Other messages formats are not
- // supported.
- Message any
-}
-
-func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry {
- var (
- data []byte
- err error
- )
- if m, ok := c.Message.(proto.Message); ok {
- data, err = proto.Marshal(m)
- if err != nil {
- grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
- }
- } else if b, ok := c.Message.([]byte); ok {
- data = b
- } else {
- grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
- }
- ret := &binlogpb.GrpcLogEntry{
- Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
- Payload: &binlogpb.GrpcLogEntry_Message{
- Message: &binlogpb.Message{
- Length: uint32(len(data)),
- Data: data,
- },
- },
- }
- if c.OnClientSide {
- ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
- } else {
- ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
- }
- return ret
-}
-
-// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry.
-type ClientHalfClose struct {
- OnClientSide bool
-}
-
-func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry {
- ret := &binlogpb.GrpcLogEntry{
- Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
- Payload: nil, // No payload here.
- }
- if c.OnClientSide {
- ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
- } else {
- ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
- }
- return ret
-}
-
-// ServerTrailer configs the binary log entry to be a ServerTrailer entry.
-type ServerTrailer struct {
- OnClientSide bool
- Trailer metadata.MD
- // Err is the status error.
- Err error
- // PeerAddr is required only when it's on client side and the RPC is trailer
- // only.
- PeerAddr net.Addr
-}
-
-func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry {
- st, ok := status.FromError(c.Err)
- if !ok {
- grpclogLogger.Info("binarylogging: error in trailer is not a status error")
- }
- var (
- detailsBytes []byte
- err error
- )
- stProto := st.Proto()
- if stProto != nil && len(stProto.Details) != 0 {
- detailsBytes, err = proto.Marshal(stProto)
- if err != nil {
- grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
- }
- }
- ret := &binlogpb.GrpcLogEntry{
- Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
- Payload: &binlogpb.GrpcLogEntry_Trailer{
- Trailer: &binlogpb.Trailer{
- Metadata: mdToMetadataProto(c.Trailer),
- StatusCode: uint32(st.Code()),
- StatusMessage: st.Message(),
- StatusDetails: detailsBytes,
- },
- },
- }
- if c.OnClientSide {
- ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
- } else {
- ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
- }
- if c.PeerAddr != nil {
- ret.Peer = addrToProto(c.PeerAddr)
- }
- return ret
-}
-
-// Cancel configs the binary log entry to be a Cancel entry.
-type Cancel struct {
- OnClientSide bool
-}
-
-func (c *Cancel) toProto() *binlogpb.GrpcLogEntry {
- ret := &binlogpb.GrpcLogEntry{
- Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL,
- Payload: nil,
- }
- if c.OnClientSide {
- ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
- } else {
- ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
- }
- return ret
-}
-
-// metadataKeyOmit returns whether the metadata entry with this key should be
-// omitted.
-func metadataKeyOmit(key string) bool {
- switch key {
- case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
- return true
- case "grpc-trace-bin": // grpc-trace-bin is special because it's visible to users.
- return false
- }
- return strings.HasPrefix(key, "grpc-")
-}
-
-func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata {
- ret := &binlogpb.Metadata{}
- for k, vv := range md {
- if metadataKeyOmit(k) {
- continue
- }
- for _, v := range vv {
- ret.Entry = append(ret.Entry,
- &binlogpb.MetadataEntry{
- Key: k,
- Value: []byte(v),
- },
- )
- }
- }
- return ret
-}
-
-func addrToProto(addr net.Addr) *binlogpb.Address {
- ret := &binlogpb.Address{}
- switch a := addr.(type) {
- case *net.TCPAddr:
- if a.IP.To4() != nil {
- ret.Type = binlogpb.Address_TYPE_IPV4
- } else if a.IP.To16() != nil {
- ret.Type = binlogpb.Address_TYPE_IPV6
- } else {
- ret.Type = binlogpb.Address_TYPE_UNKNOWN
- // Do not set address and port fields.
- break
- }
- ret.Address = a.IP.String()
- ret.IpPort = uint32(a.Port)
- case *net.UnixAddr:
- ret.Type = binlogpb.Address_TYPE_UNIX
- ret.Address = a.String()
- default:
- ret.Type = binlogpb.Address_TYPE_UNKNOWN
- }
- return ret
-}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
deleted file mode 100644
index 9ea598b14..000000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package binarylog
-
-import (
- "bufio"
- "encoding/binary"
- "io"
- "sync"
- "time"
-
- binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
- "google.golang.org/protobuf/proto"
-)
-
-var (
- // DefaultSink is the sink where the logs will be written to. It's exported
- // for the binarylog package to update.
- DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
-)
-
-// Sink writes log entry into the binary log sink.
-//
-// sink is a copy of the exported binarylog.Sink, to avoid circular dependency.
-type Sink interface {
- // Write will be called to write the log entry into the sink.
- //
- // It should be thread-safe so it can be called in parallel.
- Write(*binlogpb.GrpcLogEntry) error
- // Close will be called when the Sink is replaced by a new Sink.
- Close() error
-}
-
-type noopSink struct{}
-
-func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil }
-func (ns *noopSink) Close() error { return nil }
-
-// newWriterSink creates a binary log sink with the given writer.
-//
-// Write() marshals the proto message and writes it to the given writer. Each
-// message is prefixed with a 4 byte big endian unsigned integer as the length.
-//
-// No buffer is done, Close() doesn't try to close the writer.
-func newWriterSink(w io.Writer) Sink {
- return &writerSink{out: w}
-}
-
-type writerSink struct {
- out io.Writer
-}
-
-func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error {
- b, err := proto.Marshal(e)
- if err != nil {
- grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err)
- return err
- }
- hdr := make([]byte, 4)
- binary.BigEndian.PutUint32(hdr, uint32(len(b)))
- if _, err := ws.out.Write(hdr); err != nil {
- return err
- }
- if _, err := ws.out.Write(b); err != nil {
- return err
- }
- return nil
-}
-
-func (ws *writerSink) Close() error { return nil }
-
-type bufferedSink struct {
- mu sync.Mutex
- closer io.Closer
- out Sink // out is built on buf.
- buf *bufio.Writer // buf is kept for flush.
- flusherStarted bool
-
- writeTicker *time.Ticker
- done chan struct{}
-}
-
-func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error {
- fs.mu.Lock()
- defer fs.mu.Unlock()
- if !fs.flusherStarted {
- // Start the write loop when Write is called.
- fs.startFlushGoroutine()
- fs.flusherStarted = true
- }
- if err := fs.out.Write(e); err != nil {
- return err
- }
- return nil
-}
-
-const (
- bufFlushDuration = 60 * time.Second
-)
-
-func (fs *bufferedSink) startFlushGoroutine() {
- fs.writeTicker = time.NewTicker(bufFlushDuration)
- go func() {
- for {
- select {
- case <-fs.done:
- return
- case <-fs.writeTicker.C:
- }
- fs.mu.Lock()
- if err := fs.buf.Flush(); err != nil {
- grpclogLogger.Warningf("failed to flush to Sink: %v", err)
- }
- fs.mu.Unlock()
- }
- }()
-}
-
-func (fs *bufferedSink) Close() error {
- fs.mu.Lock()
- defer fs.mu.Unlock()
- if fs.writeTicker != nil {
- fs.writeTicker.Stop()
- }
- close(fs.done)
- if err := fs.buf.Flush(); err != nil {
- grpclogLogger.Warningf("failed to flush to Sink: %v", err)
- }
- if err := fs.closer.Close(); err != nil {
- grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err)
- }
- if err := fs.out.Close(); err != nil {
- grpclogLogger.Warningf("failed to close the Sink: %v", err)
- }
- return nil
-}
-
-// NewBufferedSink creates a binary log sink with the given WriteCloser.
-//
-// Write() marshals the proto message and writes it to the given writer. Each
-// message is prefixed with a 4 byte big endian unsigned integer as the length.
-//
-// Content is kept in a buffer, and is flushed every 60 seconds.
-//
-// Close closes the WriteCloser.
-func NewBufferedSink(o io.WriteCloser) Sink {
- bufW := bufio.NewWriter(o)
- return &bufferedSink{
- closer: o,
- out: newWriterSink(bufW),
- buf: bufW,
- done: make(chan struct{}),
- }
-}
diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
deleted file mode 100644
index 11f91668a..000000000
--- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright 2019 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package buffer provides an implementation of an unbounded buffer.
-package buffer
-
-import (
- "errors"
- "sync"
-)
-
-// Unbounded is an implementation of an unbounded buffer which does not use
-// extra goroutines. This is typically used for passing updates from one entity
-// to another within gRPC.
-//
-// All methods on this type are thread-safe and don't block on anything except
-// the underlying mutex used for synchronization.
-//
-// Unbounded supports values of any type to be stored in it by using a channel
-// of `any`. This means that a call to Put() incurs an extra memory allocation,
-// and also that users need a type assertion while reading. For performance
-// critical code paths, using Unbounded is strongly discouraged and defining a
-// new type specific implementation of this buffer is preferred. See
-// internal/transport/transport.go for an example of this.
-type Unbounded struct {
- c chan any
- closed bool
- closing bool
- mu sync.Mutex
- backlog []any
-}
-
-// NewUnbounded returns a new instance of Unbounded.
-func NewUnbounded() *Unbounded {
- return &Unbounded{c: make(chan any, 1)}
-}
-
-var errBufferClosed = errors.New("Put called on closed buffer.Unbounded")
-
-// Put adds t to the unbounded buffer.
-func (b *Unbounded) Put(t any) error {
- b.mu.Lock()
- defer b.mu.Unlock()
- if b.closing {
- return errBufferClosed
- }
- if len(b.backlog) == 0 {
- select {
- case b.c <- t:
- return nil
- default:
- }
- }
- b.backlog = append(b.backlog, t)
- return nil
-}
-
-// Load sends the earliest buffered data, if any, onto the read channel returned
-// by Get(). Users are expected to call this every time they successfully read a
-// value from the read channel.
-func (b *Unbounded) Load() {
- b.mu.Lock()
- defer b.mu.Unlock()
- if len(b.backlog) > 0 {
- select {
- case b.c <- b.backlog[0]:
- b.backlog[0] = nil
- b.backlog = b.backlog[1:]
- default:
- }
- } else if b.closing && !b.closed {
- close(b.c)
- }
-}
-
-// Get returns a read channel on which values added to the buffer, via Put(),
-// are sent on.
-//
-// Upon reading a value from this channel, users are expected to call Load() to
-// send the next buffered value onto the channel if there is any.
-//
-// If the unbounded buffer is closed, the read channel returned by this method
-// is closed after all data is drained.
-func (b *Unbounded) Get() <-chan any {
- return b.c
-}
-
-// Close closes the unbounded buffer. No subsequent data may be Put(), and the
-// channel returned from Get() will be closed after all the data is read and
-// Load() is called for the final time.
-func (b *Unbounded) Close() {
- b.mu.Lock()
- defer b.mu.Unlock()
- if b.closing {
- return
- }
- b.closing = true
- if len(b.backlog) == 0 {
- b.closed = true
- close(b.c)
- }
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go
deleted file mode 100644
index 3ec662799..000000000
--- a/vendor/google.golang.org/grpc/internal/channelz/channel.go
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "fmt"
- "sync/atomic"
-
- "google.golang.org/grpc/connectivity"
-)
-
-// Channel represents a channel within channelz, which includes metrics and
-// internal channelz data, such as channelz id, child list, etc.
-type Channel struct {
- Entity
- // ID is the channelz id of this channel.
- ID int64
- // RefName is the human readable reference string of this channel.
- RefName string
-
- closeCalled bool
- nestedChans map[int64]string
- subChans map[int64]string
- Parent *Channel
- trace *ChannelTrace
- // traceRefCount is the number of trace events that reference this channel.
- // Non-zero traceRefCount means the trace of this channel cannot be deleted.
- traceRefCount int32
-
- // ChannelMetrics holds connectivity state, target and call metrics for the
- // channel within channelz.
- ChannelMetrics ChannelMetrics
-}
-
-// Implemented to make Channel implement the Identifier interface used for
-// nesting.
-func (c *Channel) channelzIdentifier() {}
-
-// String returns a string representation of the Channel, including its parent
-// entity and ID.
-func (c *Channel) String() string {
- if c.Parent == nil {
- return fmt.Sprintf("Channel #%d", c.ID)
- }
- return fmt.Sprintf("%s Channel #%d", c.Parent, c.ID)
-}
-
-func (c *Channel) id() int64 {
- return c.ID
-}
-
-// SubChans returns a copy of the map of sub-channels associated with the
-// Channel.
-func (c *Channel) SubChans() map[int64]string {
- db.mu.RLock()
- defer db.mu.RUnlock()
- return copyMap(c.subChans)
-}
-
-// NestedChans returns a copy of the map of nested channels associated with the
-// Channel.
-func (c *Channel) NestedChans() map[int64]string {
- db.mu.RLock()
- defer db.mu.RUnlock()
- return copyMap(c.nestedChans)
-}
-
-// Trace returns a copy of the Channel's trace data.
-func (c *Channel) Trace() *ChannelTrace {
- db.mu.RLock()
- defer db.mu.RUnlock()
- return c.trace.copy()
-}
-
-// ChannelMetrics holds connectivity state, target and call metrics for the
-// channel within channelz.
-type ChannelMetrics struct {
- // The current connectivity state of the channel.
- State atomic.Pointer[connectivity.State]
- // The target this channel originally tried to connect to. May be absent
- Target atomic.Pointer[string]
- // The number of calls started on the channel.
- CallsStarted atomic.Int64
- // The number of calls that have completed with an OK status.
- CallsSucceeded atomic.Int64
- // The number of calls that have a completed with a non-OK status.
- CallsFailed atomic.Int64
- // The last time a call was started on the channel.
- LastCallStartedTimestamp atomic.Int64
-}
-
-// CopyFrom copies the metrics in o to c. For testing only.
-func (c *ChannelMetrics) CopyFrom(o *ChannelMetrics) {
- c.State.Store(o.State.Load())
- c.Target.Store(o.Target.Load())
- c.CallsStarted.Store(o.CallsStarted.Load())
- c.CallsSucceeded.Store(o.CallsSucceeded.Load())
- c.CallsFailed.Store(o.CallsFailed.Load())
- c.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load())
-}
-
-// Equal returns true iff the metrics of c are the same as the metrics of o.
-// For testing only.
-func (c *ChannelMetrics) Equal(o any) bool {
- oc, ok := o.(*ChannelMetrics)
- if !ok {
- return false
- }
- if (c.State.Load() == nil) != (oc.State.Load() == nil) {
- return false
- }
- if c.State.Load() != nil && *c.State.Load() != *oc.State.Load() {
- return false
- }
- if (c.Target.Load() == nil) != (oc.Target.Load() == nil) {
- return false
- }
- if c.Target.Load() != nil && *c.Target.Load() != *oc.Target.Load() {
- return false
- }
- return c.CallsStarted.Load() == oc.CallsStarted.Load() &&
- c.CallsFailed.Load() == oc.CallsFailed.Load() &&
- c.CallsSucceeded.Load() == oc.CallsSucceeded.Load() &&
- c.LastCallStartedTimestamp.Load() == oc.LastCallStartedTimestamp.Load()
-}
-
-func strFromPointer(s *string) string {
- if s == nil {
- return ""
- }
- return *s
-}
-
-// String returns a string representation of the ChannelMetrics, including its
-// state, target, and call metrics.
-func (c *ChannelMetrics) String() string {
- return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v",
- c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(),
- )
-}
-
-// NewChannelMetricForTesting creates a new instance of ChannelMetrics with
-// specified initial values for testing purposes.
-func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics {
- c := &ChannelMetrics{}
- c.State.Store(&state)
- c.Target.Store(&target)
- c.CallsStarted.Store(started)
- c.CallsSucceeded.Store(succeeded)
- c.CallsFailed.Store(failed)
- c.LastCallStartedTimestamp.Store(timestamp)
- return c
-}
-
-func (c *Channel) addChild(id int64, e entry) {
- switch v := e.(type) {
- case *SubChannel:
- c.subChans[id] = v.RefName
- case *Channel:
- c.nestedChans[id] = v.RefName
- default:
- logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
- }
-}
-
-func (c *Channel) deleteChild(id int64) {
- delete(c.subChans, id)
- delete(c.nestedChans, id)
- c.deleteSelfIfReady()
-}
-
-func (c *Channel) triggerDelete() {
- c.closeCalled = true
- c.deleteSelfIfReady()
-}
-
-func (c *Channel) getParentID() int64 {
- if c.Parent == nil {
- return -1
- }
- return c.Parent.ID
-}
-
-// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
-// deleting the channel reference from its parent's child list.
-//
-// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
-// corresponding grpc object has been invoked, and the channel does not have any children left.
-//
-// The returned boolean value indicates whether the channel has been successfully deleted from tree.
-func (c *Channel) deleteSelfFromTree() (deleted bool) {
- if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
- return false
- }
- // not top channel
- if c.Parent != nil {
- c.Parent.deleteChild(c.ID)
- }
- return true
-}
-
-// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
-// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
-// channel, and its memory will be garbage collected.
-//
-// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
-// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
-// the trace of the referenced entity must not be deleted. In order to release the resource allocated
-// by grpc, the reference to the grpc object is reset to a dummy object.
-//
-// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
-//
-// It returns a bool to indicate whether the channel can be safely deleted from map.
-func (c *Channel) deleteSelfFromMap() (delete bool) {
- return c.getTraceRefCount() == 0
-}
-
-// deleteSelfIfReady tries to delete the channel itself from the channelz database.
-// The delete process includes two steps:
-// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
-// parent's child list.
-// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
-// will return entry not found error.
-func (c *Channel) deleteSelfIfReady() {
- if !c.deleteSelfFromTree() {
- return
- }
- if !c.deleteSelfFromMap() {
- return
- }
- db.deleteEntry(c.ID)
- c.trace.clear()
-}
-
-func (c *Channel) getChannelTrace() *ChannelTrace {
- return c.trace
-}
-
-func (c *Channel) incrTraceRefCount() {
- atomic.AddInt32(&c.traceRefCount, 1)
-}
-
-func (c *Channel) decrTraceRefCount() {
- atomic.AddInt32(&c.traceRefCount, -1)
-}
-
-func (c *Channel) getTraceRefCount() int {
- i := atomic.LoadInt32(&c.traceRefCount)
- return int(i)
-}
-
-func (c *Channel) getRefName() string {
- return c.RefName
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
deleted file mode 100644
index 64c791953..000000000
--- a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
+++ /dev/null
@@ -1,395 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "fmt"
- "sort"
- "sync"
- "time"
-)
-
-// entry represents a node in the channelz database.
-type entry interface {
- // addChild adds a child e, whose channelz id is id to child list
- addChild(id int64, e entry)
- // deleteChild deletes a child with channelz id to be id from child list
- deleteChild(id int64)
- // triggerDelete tries to delete self from channelz database. However, if
- // child list is not empty, then deletion from the database is on hold until
- // the last child is deleted from database.
- triggerDelete()
- // deleteSelfIfReady check whether triggerDelete() has been called before,
- // and whether child list is now empty. If both conditions are met, then
- // delete self from database.
- deleteSelfIfReady()
- // getParentID returns parent ID of the entry. 0 value parent ID means no parent.
- getParentID() int64
- Entity
-}
-
-// channelMap is the storage data structure for channelz.
-//
-// Methods of channelMap can be divided into two categories with respect to
-// locking.
-//
-// 1. Methods acquire the global lock.
-// 2. Methods that can only be called when global lock is held.
-//
-// A second type of method need always to be called inside a first type of method.
-type channelMap struct {
- mu sync.RWMutex
- topLevelChannels map[int64]struct{}
- channels map[int64]*Channel
- subChannels map[int64]*SubChannel
- sockets map[int64]*Socket
- servers map[int64]*Server
-}
-
-func newChannelMap() *channelMap {
- return &channelMap{
- topLevelChannels: make(map[int64]struct{}),
- channels: make(map[int64]*Channel),
- subChannels: make(map[int64]*SubChannel),
- sockets: make(map[int64]*Socket),
- servers: make(map[int64]*Server),
- }
-}
-
-func (c *channelMap) addServer(id int64, s *Server) {
- c.mu.Lock()
- defer c.mu.Unlock()
- s.cm = c
- c.servers[id] = s
-}
-
-func (c *channelMap) addChannel(id int64, cn *Channel, isTopChannel bool, pid int64) {
- c.mu.Lock()
- defer c.mu.Unlock()
- cn.trace.cm = c
- c.channels[id] = cn
- if isTopChannel {
- c.topLevelChannels[id] = struct{}{}
- } else if p := c.channels[pid]; p != nil {
- p.addChild(id, cn)
- } else {
- logger.Infof("channel %d references invalid parent ID %d", id, pid)
- }
-}
-
-func (c *channelMap) addSubChannel(id int64, sc *SubChannel, pid int64) {
- c.mu.Lock()
- defer c.mu.Unlock()
- sc.trace.cm = c
- c.subChannels[id] = sc
- if p := c.channels[pid]; p != nil {
- p.addChild(id, sc)
- } else {
- logger.Infof("subchannel %d references invalid parent ID %d", id, pid)
- }
-}
-
-func (c *channelMap) addSocket(s *Socket) {
- c.mu.Lock()
- defer c.mu.Unlock()
- s.cm = c
- c.sockets[s.ID] = s
- if s.Parent == nil {
- logger.Infof("normal socket %d has no parent", s.ID)
- }
- s.Parent.(entry).addChild(s.ID, s)
-}
-
-// removeEntry triggers the removal of an entry, which may not indeed delete the
-// entry, if it has to wait on the deletion of its children and until no other
-// entity's channel trace references it. It may lead to a chain of entry
-// deletion. For example, deleting the last socket of a gracefully shutting down
-// server will lead to the server being also deleted.
-func (c *channelMap) removeEntry(id int64) {
- c.mu.Lock()
- defer c.mu.Unlock()
- c.findEntry(id).triggerDelete()
-}
-
-// tracedChannel represents tracing operations which are present on both
-// channels and subChannels.
-type tracedChannel interface {
- getChannelTrace() *ChannelTrace
- incrTraceRefCount()
- decrTraceRefCount()
- getRefName() string
-}
-
-// c.mu must be held by the caller
-func (c *channelMap) decrTraceRefCount(id int64) {
- e := c.findEntry(id)
- if v, ok := e.(tracedChannel); ok {
- v.decrTraceRefCount()
- e.deleteSelfIfReady()
- }
-}
-
-// c.mu must be held by the caller.
-func (c *channelMap) findEntry(id int64) entry {
- if v, ok := c.channels[id]; ok {
- return v
- }
- if v, ok := c.subChannels[id]; ok {
- return v
- }
- if v, ok := c.servers[id]; ok {
- return v
- }
- if v, ok := c.sockets[id]; ok {
- return v
- }
- return &dummyEntry{idNotFound: id}
-}
-
-// c.mu must be held by the caller
-//
-// deleteEntry deletes an entry from the channelMap. Before calling this method,
-// caller must check this entry is ready to be deleted, i.e removeEntry() has
-// been called on it, and no children still exist.
-func (c *channelMap) deleteEntry(id int64) entry {
- if v, ok := c.sockets[id]; ok {
- delete(c.sockets, id)
- return v
- }
- if v, ok := c.subChannels[id]; ok {
- delete(c.subChannels, id)
- return v
- }
- if v, ok := c.channels[id]; ok {
- delete(c.channels, id)
- delete(c.topLevelChannels, id)
- return v
- }
- if v, ok := c.servers[id]; ok {
- delete(c.servers, id)
- return v
- }
- return &dummyEntry{idNotFound: id}
-}
-
-func (c *channelMap) traceEvent(id int64, desc *TraceEvent) {
- c.mu.Lock()
- defer c.mu.Unlock()
- child := c.findEntry(id)
- childTC, ok := child.(tracedChannel)
- if !ok {
- return
- }
- childTC.getChannelTrace().append(&traceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
- if desc.Parent != nil {
- parent := c.findEntry(child.getParentID())
- var chanType RefChannelType
- switch child.(type) {
- case *Channel:
- chanType = RefChannel
- case *SubChannel:
- chanType = RefSubChannel
- }
- if parentTC, ok := parent.(tracedChannel); ok {
- parentTC.getChannelTrace().append(&traceEvent{
- Desc: desc.Parent.Desc,
- Severity: desc.Parent.Severity,
- Timestamp: time.Now(),
- RefID: id,
- RefName: childTC.getRefName(),
- RefType: chanType,
- })
- childTC.incrTraceRefCount()
- }
- }
-}
-
-type int64Slice []int64
-
-func (s int64Slice) Len() int { return len(s) }
-func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
-
-func copyMap(m map[int64]string) map[int64]string {
- n := make(map[int64]string)
- for k, v := range m {
- n[k] = v
- }
- return n
-}
-
-func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) {
- if maxResults <= 0 {
- maxResults = EntriesPerPage
- }
- c.mu.RLock()
- defer c.mu.RUnlock()
- l := int64(len(c.topLevelChannels))
- ids := make([]int64, 0, l)
-
- for k := range c.topLevelChannels {
- ids = append(ids, k)
- }
- sort.Sort(int64Slice(ids))
- idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
- end := true
- var t []*Channel
- for _, v := range ids[idx:] {
- if len(t) == maxResults {
- end = false
- break
- }
- if cn, ok := c.channels[v]; ok {
- t = append(t, cn)
- }
- }
- return t, end
-}
-
-func (c *channelMap) getServers(id int64, maxResults int) ([]*Server, bool) {
- if maxResults <= 0 {
- maxResults = EntriesPerPage
- }
- c.mu.RLock()
- defer c.mu.RUnlock()
- ids := make([]int64, 0, len(c.servers))
- for k := range c.servers {
- ids = append(ids, k)
- }
- sort.Sort(int64Slice(ids))
- idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
- end := true
- var s []*Server
- for _, v := range ids[idx:] {
- if len(s) == maxResults {
- end = false
- break
- }
- if svr, ok := c.servers[v]; ok {
- s = append(s, svr)
- }
- }
- return s, end
-}
-
-func (c *channelMap) getServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) {
- if maxResults <= 0 {
- maxResults = EntriesPerPage
- }
- c.mu.RLock()
- defer c.mu.RUnlock()
- svr, ok := c.servers[id]
- if !ok {
- // server with id doesn't exist.
- return nil, true
- }
- svrskts := svr.sockets
- ids := make([]int64, 0, len(svrskts))
- sks := make([]*Socket, 0, min(len(svrskts), maxResults))
- for k := range svrskts {
- ids = append(ids, k)
- }
- sort.Sort(int64Slice(ids))
- idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
- end := true
- for _, v := range ids[idx:] {
- if len(sks) == maxResults {
- end = false
- break
- }
- if ns, ok := c.sockets[v]; ok {
- sks = append(sks, ns)
- }
- }
- return sks, end
-}
-
-func (c *channelMap) getChannel(id int64) *Channel {
- c.mu.RLock()
- defer c.mu.RUnlock()
- return c.channels[id]
-}
-
-func (c *channelMap) getSubChannel(id int64) *SubChannel {
- c.mu.RLock()
- defer c.mu.RUnlock()
- return c.subChannels[id]
-}
-
-func (c *channelMap) getSocket(id int64) *Socket {
- c.mu.RLock()
- defer c.mu.RUnlock()
- return c.sockets[id]
-}
-
-func (c *channelMap) getServer(id int64) *Server {
- c.mu.RLock()
- defer c.mu.RUnlock()
- return c.servers[id]
-}
-
-type dummyEntry struct {
- // dummyEntry is a fake entry to handle entry not found case.
- idNotFound int64
- Entity
-}
-
-func (d *dummyEntry) String() string {
- return fmt.Sprintf("non-existent entity #%d", d.idNotFound)
-}
-
-func (d *dummyEntry) ID() int64 { return d.idNotFound }
-
-func (d *dummyEntry) addChild(id int64, e entry) {
- // Note: It is possible for a normal program to reach here under race
- // condition. For example, there could be a race between ClientConn.Close()
- // info being propagated to addrConn and http2Client. ClientConn.Close()
- // cancel the context and result in http2Client to error. The error info is
- // then caught by transport monitor and before addrConn.tearDown() is called
- // in side ClientConn.Close(). Therefore, the addrConn will create a new
- // transport. And when registering the new transport in channelz, its parent
- // addrConn could have already been torn down and deleted from channelz
- // tracking, and thus reach the code here.
- logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
-}
-
-func (d *dummyEntry) deleteChild(id int64) {
- // It is possible for a normal program to reach here under race condition.
- // Refer to the example described in addChild().
- logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
-}
-
-func (d *dummyEntry) triggerDelete() {
- logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
-}
-
-func (*dummyEntry) deleteSelfIfReady() {
- // code should not reach here. deleteSelfIfReady is always called on an existing entry.
-}
-
-func (*dummyEntry) getParentID() int64 {
- return 0
-}
-
-// Entity is implemented by all channelz types.
-type Entity interface {
- isEntity()
- fmt.Stringer
- id() int64
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
deleted file mode 100644
index 078bb8123..000000000
--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package channelz defines internal APIs for enabling channelz service, entry
-// registration/deletion, and accessing channelz data. It also defines channelz
-// metric struct formats.
-package channelz
-
-import (
- "sync/atomic"
- "time"
-
- "google.golang.org/grpc/internal"
-)
-
-var (
- // IDGen is the global channelz entity ID generator. It should not be used
- // outside this package except by tests.
- IDGen IDGenerator
-
- db = newChannelMap()
- // EntriesPerPage defines the number of channelz entries to be shown on a web page.
- EntriesPerPage = 50
- curState int32
-)
-
-// TurnOn turns on channelz data collection.
-func TurnOn() {
- atomic.StoreInt32(&curState, 1)
-}
-
-func init() {
- internal.ChannelzTurnOffForTesting = func() {
- atomic.StoreInt32(&curState, 0)
- }
-}
-
-// IsOn returns whether channelz data collection is on.
-func IsOn() bool {
- return atomic.LoadInt32(&curState) == 1
-}
-
-// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
-// boolean indicating whether there's more top channels to be queried for.
-//
-// The arg id specifies that only top channel with id at or above it will be
-// included in the result. The returned slice is up to a length of the arg
-// maxResults or EntriesPerPage if maxResults is zero, and is sorted in ascending
-// id order.
-func GetTopChannels(id int64, maxResults int) ([]*Channel, bool) {
- return db.getTopChannels(id, maxResults)
-}
-
-// GetServers returns a slice of server's ServerMetric, along with a
-// boolean indicating whether there's more servers to be queried for.
-//
-// The arg id specifies that only server with id at or above it will be included
-// in the result. The returned slice is up to a length of the arg maxResults or
-// EntriesPerPage if maxResults is zero, and is sorted in ascending id order.
-func GetServers(id int64, maxResults int) ([]*Server, bool) {
- return db.getServers(id, maxResults)
-}
-
-// GetServerSockets returns a slice of server's (identified by id) normal socket's
-// SocketMetrics, along with a boolean indicating whether there's more sockets to
-// be queried for.
-//
-// The arg startID specifies that only sockets with id at or above it will be
-// included in the result. The returned slice is up to a length of the arg maxResults
-// or EntriesPerPage if maxResults is zero, and is sorted in ascending id order.
-func GetServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) {
- return db.getServerSockets(id, startID, maxResults)
-}
-
-// GetChannel returns the Channel for the channel (identified by id).
-func GetChannel(id int64) *Channel {
- return db.getChannel(id)
-}
-
-// GetSubChannel returns the SubChannel for the subchannel (identified by id).
-func GetSubChannel(id int64) *SubChannel {
- return db.getSubChannel(id)
-}
-
-// GetSocket returns the Socket for the socket (identified by id).
-func GetSocket(id int64) *Socket {
- return db.getSocket(id)
-}
-
-// GetServer returns the ServerMetric for the server (identified by id).
-func GetServer(id int64) *Server {
- return db.getServer(id)
-}
-
-// RegisterChannel registers the given channel c in the channelz database with
-// target as its target and reference name, and adds it to the child list of its
-// parent. parent == nil means no parent.
-//
-// Returns a unique channelz identifier assigned to this channel.
-//
-// If channelz is not turned ON, the channelz database is not mutated.
-func RegisterChannel(parent *Channel, target string) *Channel {
- id := IDGen.genID()
-
- if !IsOn() {
- return &Channel{ID: id}
- }
-
- isTopChannel := parent == nil
-
- cn := &Channel{
- ID: id,
- RefName: target,
- nestedChans: make(map[int64]string),
- subChans: make(map[int64]string),
- Parent: parent,
- trace: &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())},
- }
- cn.ChannelMetrics.Target.Store(&target)
- db.addChannel(id, cn, isTopChannel, cn.getParentID())
- return cn
-}
-
-// RegisterSubChannel registers the given subChannel c in the channelz database
-// with ref as its reference name, and adds it to the child list of its parent
-// (identified by pid).
-//
-// Returns a unique channelz identifier assigned to this subChannel.
-//
-// If channelz is not turned ON, the channelz database is not mutated.
-func RegisterSubChannel(parent *Channel, ref string) *SubChannel {
- id := IDGen.genID()
- sc := &SubChannel{
- ID: id,
- RefName: ref,
- parent: parent,
- }
-
- if !IsOn() {
- return sc
- }
-
- sc.sockets = make(map[int64]string)
- sc.trace = &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())}
- db.addSubChannel(id, sc, parent.ID)
- return sc
-}
-
-// RegisterServer registers the given server s in channelz database. It returns
-// the unique channelz tracking id assigned to this server.
-//
-// If channelz is not turned ON, the channelz database is not mutated.
-func RegisterServer(ref string) *Server {
- id := IDGen.genID()
- if !IsOn() {
- return &Server{ID: id}
- }
-
- svr := &Server{
- RefName: ref,
- sockets: make(map[int64]string),
- listenSockets: make(map[int64]string),
- ID: id,
- }
- db.addServer(id, svr)
- return svr
-}
-
-// RegisterSocket registers the given normal socket s in channelz database
-// with ref as its reference name, and adds it to the child list of its parent
-// (identified by skt.Parent, which must be set). It returns the unique channelz
-// tracking id assigned to this normal socket.
-//
-// If channelz is not turned ON, the channelz database is not mutated.
-func RegisterSocket(skt *Socket) *Socket {
- skt.ID = IDGen.genID()
- if IsOn() {
- db.addSocket(skt)
- }
- return skt
-}
-
-// RemoveEntry removes an entry with unique channelz tracking id to be id from
-// channelz database.
-//
-// If channelz is not turned ON, this function is a no-op.
-func RemoveEntry(id int64) {
- if !IsOn() {
- return
- }
- db.removeEntry(id)
-}
-
-// IDGenerator is an incrementing atomic that tracks IDs for channelz entities.
-type IDGenerator struct {
- id int64
-}
-
-// Reset resets the generated ID back to zero. Should only be used at
-// initialization or by tests sensitive to the ID number.
-func (i *IDGenerator) Reset() {
- atomic.StoreInt64(&i.id, 0)
-}
-
-func (i *IDGenerator) genID() int64 {
- return atomic.AddInt64(&i.id, 1)
-}
-
-// Identifier is an opaque channelz identifier used to expose channelz symbols
-// outside of grpc. Currently only implemented by Channel since no other
-// types require exposure outside grpc.
-type Identifier interface {
- Entity
- channelzIdentifier()
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go
deleted file mode 100644
index ee4d72125..000000000
--- a/vendor/google.golang.org/grpc/internal/channelz/logging.go
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "fmt"
-
- "google.golang.org/grpc/grpclog"
-)
-
-var logger = grpclog.Component("channelz")
-
-// Info logs and adds a trace event if channelz is on.
-func Info(l grpclog.DepthLoggerV2, e Entity, args ...any) {
- AddTraceEvent(l, e, 1, &TraceEvent{
- Desc: fmt.Sprint(args...),
- Severity: CtInfo,
- })
-}
-
-// Infof logs and adds a trace event if channelz is on.
-func Infof(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
- AddTraceEvent(l, e, 1, &TraceEvent{
- Desc: fmt.Sprintf(format, args...),
- Severity: CtInfo,
- })
-}
-
-// Warning logs and adds a trace event if channelz is on.
-func Warning(l grpclog.DepthLoggerV2, e Entity, args ...any) {
- AddTraceEvent(l, e, 1, &TraceEvent{
- Desc: fmt.Sprint(args...),
- Severity: CtWarning,
- })
-}
-
-// Warningf logs and adds a trace event if channelz is on.
-func Warningf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
- AddTraceEvent(l, e, 1, &TraceEvent{
- Desc: fmt.Sprintf(format, args...),
- Severity: CtWarning,
- })
-}
-
-// Error logs and adds a trace event if channelz is on.
-func Error(l grpclog.DepthLoggerV2, e Entity, args ...any) {
- AddTraceEvent(l, e, 1, &TraceEvent{
- Desc: fmt.Sprint(args...),
- Severity: CtError,
- })
-}
-
-// Errorf logs and adds a trace event if channelz is on.
-func Errorf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
- AddTraceEvent(l, e, 1, &TraceEvent{
- Desc: fmt.Sprintf(format, args...),
- Severity: CtError,
- })
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go
deleted file mode 100644
index b5a824992..000000000
--- a/vendor/google.golang.org/grpc/internal/channelz/server.go
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "fmt"
- "sync/atomic"
-)
-
-// Server is the channelz representation of a server.
-type Server struct {
- Entity
- ID int64
- RefName string
-
- ServerMetrics ServerMetrics
-
- closeCalled bool
- sockets map[int64]string
- listenSockets map[int64]string
- cm *channelMap
-}
-
-// ServerMetrics defines a struct containing metrics for servers.
-type ServerMetrics struct {
- // The number of incoming calls started on the server.
- CallsStarted atomic.Int64
- // The number of incoming calls that have completed with an OK status.
- CallsSucceeded atomic.Int64
- // The number of incoming calls that have a completed with a non-OK status.
- CallsFailed atomic.Int64
- // The last time a call was started on the server.
- LastCallStartedTimestamp atomic.Int64
-}
-
-// NewServerMetricsForTesting returns an initialized ServerMetrics.
-func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *ServerMetrics {
- sm := &ServerMetrics{}
- sm.CallsStarted.Store(started)
- sm.CallsSucceeded.Store(succeeded)
- sm.CallsFailed.Store(failed)
- sm.LastCallStartedTimestamp.Store(timestamp)
- return sm
-}
-
-// CopyFrom copies the metrics data from the provided ServerMetrics
-// instance into the current instance.
-func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) {
- sm.CallsStarted.Store(o.CallsStarted.Load())
- sm.CallsSucceeded.Store(o.CallsSucceeded.Load())
- sm.CallsFailed.Store(o.CallsFailed.Load())
- sm.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load())
-}
-
-// ListenSockets returns the listening sockets for s.
-func (s *Server) ListenSockets() map[int64]string {
- db.mu.RLock()
- defer db.mu.RUnlock()
- return copyMap(s.listenSockets)
-}
-
-// String returns a printable description of s.
-func (s *Server) String() string {
- return fmt.Sprintf("Server #%d", s.ID)
-}
-
-func (s *Server) id() int64 {
- return s.ID
-}
-
-func (s *Server) addChild(id int64, e entry) {
- switch v := e.(type) {
- case *Socket:
- switch v.SocketType {
- case SocketTypeNormal:
- s.sockets[id] = v.RefName
- case SocketTypeListen:
- s.listenSockets[id] = v.RefName
- }
- default:
- logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
- }
-}
-
-func (s *Server) deleteChild(id int64) {
- delete(s.sockets, id)
- delete(s.listenSockets, id)
- s.deleteSelfIfReady()
-}
-
-func (s *Server) triggerDelete() {
- s.closeCalled = true
- s.deleteSelfIfReady()
-}
-
-func (s *Server) deleteSelfIfReady() {
- if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
- return
- }
- s.cm.deleteEntry(s.ID)
-}
-
-func (s *Server) getParentID() int64 {
- return 0
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go
deleted file mode 100644
index 90103847c..000000000
--- a/vendor/google.golang.org/grpc/internal/channelz/socket.go
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "fmt"
- "net"
- "sync/atomic"
-
- "google.golang.org/grpc/credentials"
-)
-
-// SocketMetrics defines the struct that the implementor of Socket interface
-// should return from ChannelzMetric().
-type SocketMetrics struct {
- // The number of streams that have been started.
- StreamsStarted atomic.Int64
- // The number of streams that have ended successfully:
- // On client side, receiving frame with eos bit set.
- // On server side, sending frame with eos bit set.
- StreamsSucceeded atomic.Int64
- // The number of streams that have ended unsuccessfully:
- // On client side, termination without receiving frame with eos bit set.
- // On server side, termination without sending frame with eos bit set.
- StreamsFailed atomic.Int64
- // The number of messages successfully sent on this socket.
- MessagesSent atomic.Int64
- MessagesReceived atomic.Int64
- // The number of keep alives sent. This is typically implemented with HTTP/2
- // ping messages.
- KeepAlivesSent atomic.Int64
- // The last time a stream was created by this endpoint. Usually unset for
- // servers.
- LastLocalStreamCreatedTimestamp atomic.Int64
- // The last time a stream was created by the remote endpoint. Usually unset
- // for clients.
- LastRemoteStreamCreatedTimestamp atomic.Int64
- // The last time a message was sent by this endpoint.
- LastMessageSentTimestamp atomic.Int64
- // The last time a message was received by this endpoint.
- LastMessageReceivedTimestamp atomic.Int64
-}
-
-// EphemeralSocketMetrics are metrics that change rapidly and are tracked
-// outside of channelz.
-type EphemeralSocketMetrics struct {
- // The amount of window, granted to the local endpoint by the remote endpoint.
- // This may be slightly out of date due to network latency. This does NOT
- // include stream level or TCP level flow control info.
- LocalFlowControlWindow int64
- // The amount of window, granted to the remote endpoint by the local endpoint.
- // This may be slightly out of date due to network latency. This does NOT
- // include stream level or TCP level flow control info.
- RemoteFlowControlWindow int64
-}
-
-// SocketType represents the type of socket.
-type SocketType string
-
-// SocketType can be one of these.
-const (
- SocketTypeNormal = "NormalSocket"
- SocketTypeListen = "ListenSocket"
-)
-
-// Socket represents a socket within channelz which includes socket
-// metrics and data related to socket activity and provides methods
-// for managing and interacting with sockets.
-type Socket struct {
- Entity
- SocketType SocketType
- ID int64
- Parent Entity
- cm *channelMap
- SocketMetrics SocketMetrics
- EphemeralMetrics func() *EphemeralSocketMetrics
-
- RefName string
- // The locally bound address. Immutable.
- LocalAddr net.Addr
- // The remote bound address. May be absent. Immutable.
- RemoteAddr net.Addr
- // Optional, represents the name of the remote endpoint, if different than
- // the original target name. Immutable.
- RemoteName string
- // Immutable.
- SocketOptions *SocketOptionData
- // Immutable.
- Security credentials.ChannelzSecurityValue
-}
-
-// String returns a string representation of the Socket, including its parent
-// entity, socket type, and ID.
-func (ls *Socket) String() string {
- return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID)
-}
-
-func (ls *Socket) id() int64 {
- return ls.ID
-}
-
-func (ls *Socket) addChild(id int64, e entry) {
- logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
-}
-
-func (ls *Socket) deleteChild(id int64) {
- logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
-}
-
-func (ls *Socket) triggerDelete() {
- ls.cm.deleteEntry(ls.ID)
- ls.Parent.(entry).deleteChild(ls.ID)
-}
-
-func (ls *Socket) deleteSelfIfReady() {
- logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
-}
-
-func (ls *Socket) getParentID() int64 {
- return ls.Parent.id()
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
deleted file mode 100644
index b20802e6e..000000000
--- a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "fmt"
- "sync/atomic"
-)
-
-// SubChannel is the channelz representation of a subchannel.
-type SubChannel struct {
- Entity
- // ID is the channelz id of this subchannel.
- ID int64
- // RefName is the human readable reference string of this subchannel.
- RefName string
- closeCalled bool
- sockets map[int64]string
- parent *Channel
- trace *ChannelTrace
- traceRefCount int32
-
- ChannelMetrics ChannelMetrics
-}
-
-func (sc *SubChannel) String() string {
- return fmt.Sprintf("%s SubChannel #%d", sc.parent, sc.ID)
-}
-
-func (sc *SubChannel) id() int64 {
- return sc.ID
-}
-
-// Sockets returns a copy of the sockets map associated with the SubChannel.
-func (sc *SubChannel) Sockets() map[int64]string {
- db.mu.RLock()
- defer db.mu.RUnlock()
- return copyMap(sc.sockets)
-}
-
-// Trace returns a copy of the ChannelTrace associated with the SubChannel.
-func (sc *SubChannel) Trace() *ChannelTrace {
- db.mu.RLock()
- defer db.mu.RUnlock()
- return sc.trace.copy()
-}
-
-func (sc *SubChannel) addChild(id int64, e entry) {
- if v, ok := e.(*Socket); ok && v.SocketType == SocketTypeNormal {
- sc.sockets[id] = v.RefName
- } else {
- logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
- }
-}
-
-func (sc *SubChannel) deleteChild(id int64) {
- delete(sc.sockets, id)
- sc.deleteSelfIfReady()
-}
-
-func (sc *SubChannel) triggerDelete() {
- sc.closeCalled = true
- sc.deleteSelfIfReady()
-}
-
-func (sc *SubChannel) getParentID() int64 {
- return sc.parent.ID
-}
-
-// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
-// means deleting the subchannel reference from its parent's child list.
-//
-// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
-// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
-//
-// The returned boolean value indicates whether the channel has been successfully deleted from tree.
-func (sc *SubChannel) deleteSelfFromTree() (deleted bool) {
- if !sc.closeCalled || len(sc.sockets) != 0 {
- return false
- }
- sc.parent.deleteChild(sc.ID)
- return true
-}
-
-// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
-// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
-// the subchannel, and its memory will be garbage collected.
-//
-// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
-// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
-// the trace of the referenced entity must not be deleted. In order to release the resource allocated
-// by grpc, the reference to the grpc object is reset to a dummy object.
-//
-// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
-//
-// It returns a bool to indicate whether the channel can be safely deleted from map.
-func (sc *SubChannel) deleteSelfFromMap() (delete bool) {
- return sc.getTraceRefCount() == 0
-}
-
-// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
-// The delete process includes two steps:
-// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
-// its parent's child list.
-// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
-// by id will return entry not found error.
-func (sc *SubChannel) deleteSelfIfReady() {
- if !sc.deleteSelfFromTree() {
- return
- }
- if !sc.deleteSelfFromMap() {
- return
- }
- db.deleteEntry(sc.ID)
- sc.trace.clear()
-}
-
-func (sc *SubChannel) getChannelTrace() *ChannelTrace {
- return sc.trace
-}
-
-func (sc *SubChannel) incrTraceRefCount() {
- atomic.AddInt32(&sc.traceRefCount, 1)
-}
-
-func (sc *SubChannel) decrTraceRefCount() {
- atomic.AddInt32(&sc.traceRefCount, -1)
-}
-
-func (sc *SubChannel) getTraceRefCount() int {
- i := atomic.LoadInt32(&sc.traceRefCount)
- return int(i)
-}
-
-func (sc *SubChannel) getRefName() string {
- return sc.RefName
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go
deleted file mode 100644
index 5ac73ff83..000000000
--- a/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "syscall"
-
- "golang.org/x/sys/unix"
-)
-
-// SocketOptionData defines the struct to hold socket option data, and related
-// getter function to obtain info from fd.
-type SocketOptionData struct {
- Linger *unix.Linger
- RecvTimeout *unix.Timeval
- SendTimeout *unix.Timeval
- TCPInfo *unix.TCPInfo
-}
-
-// Getsockopt defines the function to get socket options requested by channelz.
-// It is to be passed to syscall.RawConn.Control().
-func (s *SocketOptionData) Getsockopt(fd uintptr) {
- if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil {
- s.Linger = v
- }
- if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil {
- s.RecvTimeout = v
- }
- if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil {
- s.SendTimeout = v
- }
- if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
- s.TCPInfo = v
- }
-}
-
-// GetSocketOption gets the socket option info of the conn.
-func GetSocketOption(socket any) *SocketOptionData {
- c, ok := socket.(syscall.Conn)
- if !ok {
- return nil
- }
- data := &SocketOptionData{}
- if rawConn, err := c.SyscallConn(); err == nil {
- rawConn.Control(data.Getsockopt)
- return data
- }
- return nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
deleted file mode 100644
index 0e6e18e18..000000000
--- a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
+++ /dev/null
@@ -1,47 +0,0 @@
-//go:build !linux
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "sync"
-)
-
-var once sync.Once
-
-// SocketOptionData defines the struct to hold socket option data, and related
-// getter function to obtain info from fd.
-// Windows OS doesn't support Socket Option
-type SocketOptionData struct {
-}
-
-// Getsockopt defines the function to get socket options requested by channelz.
-// It is to be passed to syscall.RawConn.Control().
-// Windows OS doesn't support Socket Option
-func (s *SocketOptionData) Getsockopt(uintptr) {
- once.Do(func() {
- logger.Warning("Channelz: socket options are not supported on non-linux environments")
- })
-}
-
-// GetSocketOption gets the socket option info of the conn.
-func GetSocketOption(any) *SocketOptionData {
- return nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go
deleted file mode 100644
index 2bffe4777..000000000
--- a/vendor/google.golang.org/grpc/internal/channelz/trace.go
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "fmt"
- "sync"
- "sync/atomic"
- "time"
-
- "google.golang.org/grpc/grpclog"
-)
-
-const (
- defaultMaxTraceEntry int32 = 30
-)
-
-var maxTraceEntry = defaultMaxTraceEntry
-
-// SetMaxTraceEntry sets maximum number of trace entries per entity (i.e.
-// channel/subchannel). Setting it to 0 will disable channel tracing.
-func SetMaxTraceEntry(i int32) {
- atomic.StoreInt32(&maxTraceEntry, i)
-}
-
-// ResetMaxTraceEntryToDefault resets the maximum number of trace entries per
-// entity to default.
-func ResetMaxTraceEntryToDefault() {
- atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
-}
-
-func getMaxTraceEntry() int {
- i := atomic.LoadInt32(&maxTraceEntry)
- return int(i)
-}
-
-// traceEvent is an internal representation of a single trace event
-type traceEvent struct {
- // Desc is a simple description of the trace event.
- Desc string
- // Severity states the severity of this trace event.
- Severity Severity
- // Timestamp is the event time.
- Timestamp time.Time
- // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
- // involved in this event.
- // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
- RefID int64
- // RefName is the reference name for the entity that gets referenced in the event.
- RefName string
- // RefType indicates the referenced entity type, i.e Channel or SubChannel.
- RefType RefChannelType
-}
-
-// TraceEvent is what the caller of AddTraceEvent should provide to describe the
-// event to be added to the channel trace.
-//
-// The Parent field is optional. It is used for an event that will be recorded
-// in the entity's parent trace.
-type TraceEvent struct {
- Desc string
- Severity Severity
- Parent *TraceEvent
-}
-
-// ChannelTrace provides tracing information for a channel.
-// It tracks various events and metadata related to the channel's lifecycle
-// and operations.
-type ChannelTrace struct {
- cm *channelMap
- clearCalled bool
- // The time when the trace was created.
- CreationTime time.Time
- // A counter for the number of events recorded in the
- // trace.
- EventNum int64
- mu sync.Mutex
- // A slice of traceEvent pointers representing the events recorded for
- // this channel.
- Events []*traceEvent
-}
-
-func (c *ChannelTrace) copy() *ChannelTrace {
- return &ChannelTrace{
- CreationTime: c.CreationTime,
- EventNum: c.EventNum,
- Events: append(([]*traceEvent)(nil), c.Events...),
- }
-}
-
-func (c *ChannelTrace) append(e *traceEvent) {
- c.mu.Lock()
- if len(c.Events) == getMaxTraceEntry() {
- del := c.Events[0]
- c.Events = c.Events[1:]
- if del.RefID != 0 {
- // start recursive cleanup in a goroutine to not block the call originated from grpc.
- go func() {
- // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
- c.cm.mu.Lock()
- c.cm.decrTraceRefCount(del.RefID)
- c.cm.mu.Unlock()
- }()
- }
- }
- e.Timestamp = time.Now()
- c.Events = append(c.Events, e)
- c.EventNum++
- c.mu.Unlock()
-}
-
-func (c *ChannelTrace) clear() {
- if c.clearCalled {
- return
- }
- c.clearCalled = true
- c.mu.Lock()
- for _, e := range c.Events {
- if e.RefID != 0 {
- // caller should have already held the c.cm.mu lock.
- c.cm.decrTraceRefCount(e.RefID)
- }
- }
- c.mu.Unlock()
-}
-
-// Severity is the severity level of a trace event.
-// The canonical enumeration of all valid values is here:
-// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
-type Severity int
-
-const (
- // CtUnknown indicates unknown severity of a trace event.
- CtUnknown Severity = iota
- // CtInfo indicates info level severity of a trace event.
- CtInfo
- // CtWarning indicates warning level severity of a trace event.
- CtWarning
- // CtError indicates error level severity of a trace event.
- CtError
-)
-
-// RefChannelType is the type of the entity being referenced in a trace event.
-type RefChannelType int
-
-const (
- // RefUnknown indicates an unknown entity type, the zero value for this type.
- RefUnknown RefChannelType = iota
- // RefChannel indicates the referenced entity is a Channel.
- RefChannel
- // RefSubChannel indicates the referenced entity is a SubChannel.
- RefSubChannel
- // RefServer indicates the referenced entity is a Server.
- RefServer
- // RefListenSocket indicates the referenced entity is a ListenSocket.
- RefListenSocket
- // RefNormalSocket indicates the referenced entity is a NormalSocket.
- RefNormalSocket
-)
-
-var refChannelTypeToString = map[RefChannelType]string{
- RefUnknown: "Unknown",
- RefChannel: "Channel",
- RefSubChannel: "SubChannel",
- RefServer: "Server",
- RefListenSocket: "ListenSocket",
- RefNormalSocket: "NormalSocket",
-}
-
-// String returns a string representation of the RefChannelType
-func (r RefChannelType) String() string {
- return refChannelTypeToString[r]
-}
-
-// AddTraceEvent adds trace related to the entity with specified id, using the
-// provided TraceEventDesc.
-//
-// If channelz is not turned ON, this will simply log the event descriptions.
-func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) {
- // Log only the trace description associated with the bottom most entity.
- d := fmt.Sprintf("[%s]%s", e, desc.Desc)
- switch desc.Severity {
- case CtUnknown, CtInfo:
- l.InfoDepth(depth+1, d)
- case CtWarning:
- l.WarningDepth(depth+1, d)
- case CtError:
- l.ErrorDepth(depth+1, d)
- }
-
- if getMaxTraceEntry() == 0 {
- return
- }
- if IsOn() {
- db.traceEvent(e.id(), desc)
- }
-}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go
deleted file mode 100644
index 9deee7f65..000000000
--- a/vendor/google.golang.org/grpc/internal/credentials/credentials.go
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright 2021 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package credentials
-
-import (
- "context"
-)
-
-// requestInfoKey is a struct to be used as the key to store RequestInfo in a
-// context.
-type requestInfoKey struct{}
-
-// NewRequestInfoContext creates a context with ri.
-func NewRequestInfoContext(ctx context.Context, ri any) context.Context {
- return context.WithValue(ctx, requestInfoKey{}, ri)
-}
-
-// RequestInfoFromContext extracts the RequestInfo from ctx.
-func RequestInfoFromContext(ctx context.Context) any {
- return ctx.Value(requestInfoKey{})
-}
-
-// clientHandshakeInfoKey is a struct used as the key to store
-// ClientHandshakeInfo in a context.
-type clientHandshakeInfoKey struct{}
-
-// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx.
-func ClientHandshakeInfoFromContext(ctx context.Context) any {
- return ctx.Value(clientHandshakeInfoKey{})
-}
-
-// NewClientHandshakeInfoContext creates a context with chi.
-func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context {
- return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
-}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go
deleted file mode 100644
index 25ade6230..000000000
--- a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package credentials defines APIs for parsing SPIFFE ID.
-//
-// All APIs in this package are experimental.
-package credentials
-
-import (
- "crypto/tls"
- "crypto/x509"
- "net/url"
-
- "google.golang.org/grpc/grpclog"
-)
-
-var logger = grpclog.Component("credentials")
-
-// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format
-// is invalid, return nil with warning.
-func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
- if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 {
- return nil
- }
- return SPIFFEIDFromCert(state.PeerCertificates[0])
-}
-
-// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE
-// ID format is invalid, return nil with warning.
-func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL {
- if cert == nil || cert.URIs == nil {
- return nil
- }
- var spiffeID *url.URL
- for _, uri := range cert.URIs {
- if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") {
- continue
- }
- // From this point, we assume the uri is intended for a SPIFFE ID.
- if len(uri.String()) > 2048 {
- logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes")
- return nil
- }
- if len(uri.Host) == 0 || len(uri.Path) == 0 {
- logger.Warning("invalid SPIFFE ID: domain or workload ID is empty")
- return nil
- }
- if len(uri.Host) > 255 {
- logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters")
- return nil
- }
- // A valid SPIFFE certificate can only have exactly one URI SAN field.
- if len(cert.URIs) > 1 {
- logger.Warning("invalid SPIFFE ID: multiple URI SANs")
- return nil
- }
- spiffeID = uri
- }
- return spiffeID
-}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
deleted file mode 100644
index 2919632d6..000000000
--- a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package credentials
-
-import (
- "net"
- "syscall"
-)
-
-type sysConn = syscall.Conn
-
-// syscallConn keeps reference of rawConn to support syscall.Conn for channelz.
-// SyscallConn() (the method in interface syscall.Conn) is explicitly
-// implemented on this type,
-//
-// Interface syscall.Conn is implemented by most net.Conn implementations (e.g.
-// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns
-// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn
-// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't
-// help here).
-type syscallConn struct {
- net.Conn
- // sysConn is a type alias of syscall.Conn. It's necessary because the name
- // `Conn` collides with `net.Conn`.
- sysConn
-}
-
-// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that
-// implements syscall.Conn. rawConn will be used to support syscall, and newConn
-// will be used for read/write.
-//
-// This function returns newConn if rawConn doesn't implement syscall.Conn.
-func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
- sysConn, ok := rawConn.(syscall.Conn)
- if !ok {
- return newConn
- }
- return &syscallConn{
- Conn: newConn,
- sysConn: sysConn,
- }
-}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go
deleted file mode 100644
index f792fd22c..000000000
--- a/vendor/google.golang.org/grpc/internal/credentials/util.go
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package credentials
-
-import (
- "crypto/tls"
-)
-
-const alpnProtoStrH2 = "h2"
-
-// AppendH2ToNextProtos appends h2 to next protos.
-func AppendH2ToNextProtos(ps []string) []string {
- for _, p := range ps {
- if p == alpnProtoStrH2 {
- return ps
- }
- }
- ret := make([]string, 0, len(ps)+1)
- ret = append(ret, ps...)
- return append(ret, alpnProtoStrH2)
-}
-
-// CloneTLSConfig returns a shallow clone of the exported
-// fields of cfg, ignoring the unexported sync.Once, which
-// contains a mutex and must not be copied.
-//
-// If cfg is nil, a new zero tls.Config is returned.
-//
-// TODO: inline this function if possible.
-func CloneTLSConfig(cfg *tls.Config) *tls.Config {
- if cfg == nil {
- return &tls.Config{}
- }
-
- return cfg.Clone()
-}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
deleted file mode 100644
index 1e42b6fdc..000000000
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package envconfig contains grpc settings configured by environment variables.
-package envconfig
-
-import (
- "os"
- "strconv"
- "strings"
-)
-
-var (
- // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
- TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
- // RingHashCap indicates the maximum ring size which defaults to 4096
- // entries but may be overridden by setting the environment variable
- // "GRPC_RING_HASH_CAP". This does not override the default bounds
- // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
- RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
- // LeastRequestLB is set if we should support the least_request_experimental
- // LB policy, which can be enabled by setting the environment variable
- // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true".
- LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false)
- // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS
- // handshakes that can be performed.
- ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100)
- // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled
- // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this
- // option is present for backward compatibility. This option may be overridden
- // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true"
- // or "false".
- EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true)
- // XDSFallbackSupport is the env variable that controls whether support for
- // xDS fallback is turned on. If this is unset or is false, only the first
- // xDS server in the list of server configs will be used.
- XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true)
- // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used
- // instead of the exiting pickfirst implementation. This can be enabled by
- // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST"
- // to "true".
- NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false)
-)
-
-func boolFromEnv(envVar string, def bool) bool {
- if def {
- // The default is true; return true unless the variable is "false".
- return !strings.EqualFold(os.Getenv(envVar), "false")
- }
- // The default is false; return false unless the variable is "true".
- return strings.EqualFold(os.Getenv(envVar), "true")
-}
-
-func uint64FromEnv(envVar string, def, min, max uint64) uint64 {
- v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64)
- if err != nil {
- return def
- }
- if v < min {
- return min
- }
- if v > max {
- return max
- }
- return v
-}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go
deleted file mode 100644
index dd314cfb1..000000000
--- a/vendor/google.golang.org/grpc/internal/envconfig/observability.go
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- *
- * Copyright 2022 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package envconfig
-
-import "os"
-
-const (
- envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG"
- envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE"
-)
-
-var (
- // ObservabilityConfig is the json configuration for the gcp/observability
- // package specified directly in the envObservabilityConfig env var.
- //
- // This is used in the 1.0 release of gcp/observability, and thus must not be
- // deleted or changed.
- ObservabilityConfig = os.Getenv(envObservabilityConfig)
- // ObservabilityConfigFile is the json configuration for the
- // gcp/observability specified in a file with the location specified in
- // envObservabilityConfigFile env var.
- //
- // This is used in the 1.0 release of gcp/observability, and thus must not be
- // deleted or changed.
- ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile)
-)
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
deleted file mode 100644
index 2eb97f832..000000000
--- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package envconfig
-
-import (
- "os"
-)
-
-const (
- // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name.
- // Do not use this and read from env directly. Its value is read and kept in
- // variable XDSBootstrapFileName.
- //
- // When both bootstrap FileName and FileContent are set, FileName is used.
- XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP"
- // XDSBootstrapFileContentEnv is the env variable to set bootstrap file
- // content. Do not use this and read from env directly. Its value is read
- // and kept in variable XDSBootstrapFileContent.
- //
- // When both bootstrap FileName and FileContent are set, FileName is used.
- XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
-)
-
-var (
- // XDSBootstrapFileName holds the name of the file which contains xDS
- // bootstrap configuration. Users can specify the location of the bootstrap
- // file by setting the environment variable "GRPC_XDS_BOOTSTRAP".
- //
- // When both bootstrap FileName and FileContent are set, FileName is used.
- XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv)
- // XDSBootstrapFileContent holds the content of the xDS bootstrap
- // configuration. Users can specify the bootstrap config by setting the
- // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG".
- //
- // When both bootstrap FileName and FileContent are set, FileName is used.
- XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv)
-
- // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
- C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
-
- // XDSDualstackEndpointsEnabled is true if gRPC should read the
- // "additional addresses" in the xDS endpoint resource.
- XDSDualstackEndpointsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_DUALSTACK_ENDPOINTS", true)
-
- // XDSSystemRootCertsEnabled is true when xDS enabled gRPC clients can use
- // the system's default root certificates for TLS certificate validation.
- // For more details, see:
- // https://github.com/grpc/proposal/blob/master/A82-xds-system-root-certs.md.
- XDSSystemRootCertsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_SYSTEM_ROOT_CERTS", false)
-)
diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go
deleted file mode 100644
index 7617be215..000000000
--- a/vendor/google.golang.org/grpc/internal/experimental.go
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright 2023 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package internal
-
-var (
- // WithBufferPool is implemented by the grpc package and returns a dial
- // option to configure a shared buffer pool for a grpc.ClientConn.
- WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
-
- // BufferPool is implemented by the grpc package and returns a server
- // option to configure a shared buffer pool for a grpc.Server.
- BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
-)
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
deleted file mode 100644
index 092ad187a..000000000
--- a/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpclog provides logging functionality for internal gRPC packages,
-// outside of the functionality provided by the external `grpclog` package.
-package grpclog
-
-import (
- "fmt"
-
- "google.golang.org/grpc/grpclog"
-)
-
-// PrefixLogger does logging with a prefix.
-//
-// Logging method on a nil logs without any prefix.
-type PrefixLogger struct {
- logger grpclog.DepthLoggerV2
- prefix string
-}
-
-// Infof does info logging.
-func (pl *PrefixLogger) Infof(format string, args ...any) {
- if pl != nil {
- // Handle nil, so the tests can pass in a nil logger.
- format = pl.prefix + format
- pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
- return
- }
- grpclog.InfoDepth(1, fmt.Sprintf(format, args...))
-}
-
-// Warningf does warning logging.
-func (pl *PrefixLogger) Warningf(format string, args ...any) {
- if pl != nil {
- format = pl.prefix + format
- pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
- return
- }
- grpclog.WarningDepth(1, fmt.Sprintf(format, args...))
-}
-
-// Errorf does error logging.
-func (pl *PrefixLogger) Errorf(format string, args ...any) {
- if pl != nil {
- format = pl.prefix + format
- pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
- return
- }
- grpclog.ErrorDepth(1, fmt.Sprintf(format, args...))
-}
-
-// V reports whether verbosity level l is at least the requested verbose level.
-func (pl *PrefixLogger) V(l int) bool {
- if pl != nil {
- return pl.logger.V(l)
- }
- return true
-}
-
-// NewPrefixLogger creates a prefix logger with the given prefix.
-func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger {
- return &PrefixLogger{logger: logger, prefix: prefix}
-}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
deleted file mode 100644
index 8e8e86128..000000000
--- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- *
- * Copyright 2022 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpcsync
-
-import (
- "context"
-
- "google.golang.org/grpc/internal/buffer"
-)
-
-// CallbackSerializer provides a mechanism to schedule callbacks in a
-// synchronized manner. It provides a FIFO guarantee on the order of execution
-// of scheduled callbacks. New callbacks can be scheduled by invoking the
-// Schedule() method.
-//
-// This type is safe for concurrent access.
-type CallbackSerializer struct {
- // done is closed once the serializer is shut down completely, i.e all
- // scheduled callbacks are executed and the serializer has deallocated all
- // its resources.
- done chan struct{}
-
- callbacks *buffer.Unbounded
-}
-
-// NewCallbackSerializer returns a new CallbackSerializer instance. The provided
-// context will be passed to the scheduled callbacks. Users should cancel the
-// provided context to shutdown the CallbackSerializer. It is guaranteed that no
-// callbacks will be added once this context is canceled, and any pending un-run
-// callbacks will be executed before the serializer is shut down.
-func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
- cs := &CallbackSerializer{
- done: make(chan struct{}),
- callbacks: buffer.NewUnbounded(),
- }
- go cs.run(ctx)
- return cs
-}
-
-// TrySchedule tries to schedule the provided callback function f to be
-// executed in the order it was added. This is a best-effort operation. If the
-// context passed to NewCallbackSerializer was canceled before this method is
-// called, the callback will not be scheduled.
-//
-// Callbacks are expected to honor the context when performing any blocking
-// operations, and should return early when the context is canceled.
-func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) {
- cs.callbacks.Put(f)
-}
-
-// ScheduleOr schedules the provided callback function f to be executed in the
-// order it was added. If the context passed to NewCallbackSerializer has been
-// canceled before this method is called, the onFailure callback will be
-// executed inline instead.
-//
-// Callbacks are expected to honor the context when performing any blocking
-// operations, and should return early when the context is canceled.
-func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) {
- if cs.callbacks.Put(f) != nil {
- onFailure()
- }
-}
-
-func (cs *CallbackSerializer) run(ctx context.Context) {
- defer close(cs.done)
-
- // TODO: when Go 1.21 is the oldest supported version, this loop and Close
- // can be replaced with:
- //
- // context.AfterFunc(ctx, cs.callbacks.Close)
- for ctx.Err() == nil {
- select {
- case <-ctx.Done():
- // Do nothing here. Next iteration of the for loop will not happen,
- // since ctx.Err() would be non-nil.
- case cb := <-cs.callbacks.Get():
- cs.callbacks.Load()
- cb.(func(context.Context))(ctx)
- }
- }
-
- // Close the buffer to prevent new callbacks from being added.
- cs.callbacks.Close()
-
- // Run all pending callbacks.
- for cb := range cs.callbacks.Get() {
- cs.callbacks.Load()
- cb.(func(context.Context))(ctx)
- }
-}
-
-// Done returns a channel that is closed after the context passed to
-// NewCallbackSerializer is canceled and all callbacks have been executed.
-func (cs *CallbackSerializer) Done() <-chan struct{} {
- return cs.done
-}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go
deleted file mode 100644
index fbe697c37..000000000
--- a/vendor/google.golang.org/grpc/internal/grpcsync/event.go
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpcsync implements additional synchronization primitives built upon
-// the sync package.
-package grpcsync
-
-import (
- "sync"
- "sync/atomic"
-)
-
-// Event represents a one-time event that may occur in the future.
-type Event struct {
- fired int32
- c chan struct{}
- o sync.Once
-}
-
-// Fire causes e to complete. It is safe to call multiple times, and
-// concurrently. It returns true iff this call to Fire caused the signaling
-// channel returned by Done to close.
-func (e *Event) Fire() bool {
- ret := false
- e.o.Do(func() {
- atomic.StoreInt32(&e.fired, 1)
- close(e.c)
- ret = true
- })
- return ret
-}
-
-// Done returns a channel that will be closed when Fire is called.
-func (e *Event) Done() <-chan struct{} {
- return e.c
-}
-
-// HasFired returns true if Fire has been called.
-func (e *Event) HasFired() bool {
- return atomic.LoadInt32(&e.fired) == 1
-}
-
-// NewEvent returns a new, ready-to-use Event.
-func NewEvent() *Event {
- return &Event{c: make(chan struct{})}
-}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
deleted file mode 100644
index 6d8c2f518..000000000
--- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- *
- * Copyright 2023 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpcsync
-
-import (
- "context"
- "sync"
-)
-
-// Subscriber represents an entity that is subscribed to messages published on
-// a PubSub. It wraps the callback to be invoked by the PubSub when a new
-// message is published.
-type Subscriber interface {
- // OnMessage is invoked when a new message is published. Implementations
- // must not block in this method.
- OnMessage(msg any)
-}
-
-// PubSub is a simple one-to-many publish-subscribe system that supports
-// messages of arbitrary type. It guarantees that messages are delivered in
-// the same order in which they were published.
-//
-// Publisher invokes the Publish() method to publish new messages, while
-// subscribers interested in receiving these messages register a callback
-// via the Subscribe() method.
-//
-// Once a PubSub is stopped, no more messages can be published, but any pending
-// published messages will be delivered to the subscribers. Done may be used
-// to determine when all published messages have been delivered.
-type PubSub struct {
- cs *CallbackSerializer
-
- // Access to the below fields are guarded by this mutex.
- mu sync.Mutex
- msg any
- subscribers map[Subscriber]bool
-}
-
-// NewPubSub returns a new PubSub instance. Users should cancel the
-// provided context to shutdown the PubSub.
-func NewPubSub(ctx context.Context) *PubSub {
- return &PubSub{
- cs: NewCallbackSerializer(ctx),
- subscribers: map[Subscriber]bool{},
- }
-}
-
-// Subscribe registers the provided Subscriber to the PubSub.
-//
-// If the PubSub contains a previously published message, the Subscriber's
-// OnMessage() callback will be invoked asynchronously with the existing
-// message to begin with, and subsequently for every newly published message.
-//
-// The caller is responsible for invoking the returned cancel function to
-// unsubscribe itself from the PubSub.
-func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) {
- ps.mu.Lock()
- defer ps.mu.Unlock()
-
- ps.subscribers[sub] = true
-
- if ps.msg != nil {
- msg := ps.msg
- ps.cs.TrySchedule(func(context.Context) {
- ps.mu.Lock()
- defer ps.mu.Unlock()
- if !ps.subscribers[sub] {
- return
- }
- sub.OnMessage(msg)
- })
- }
-
- return func() {
- ps.mu.Lock()
- defer ps.mu.Unlock()
- delete(ps.subscribers, sub)
- }
-}
-
-// Publish publishes the provided message to the PubSub, and invokes
-// callbacks registered by subscribers asynchronously.
-func (ps *PubSub) Publish(msg any) {
- ps.mu.Lock()
- defer ps.mu.Unlock()
-
- ps.msg = msg
- for sub := range ps.subscribers {
- s := sub
- ps.cs.TrySchedule(func(context.Context) {
- ps.mu.Lock()
- defer ps.mu.Unlock()
- if !ps.subscribers[s] {
- return
- }
- s.OnMessage(msg)
- })
- }
-}
-
-// Done returns a channel that is closed after the context passed to NewPubSub
-// is canceled and all updates have been sent to subscribers.
-func (ps *PubSub) Done() <-chan struct{} {
- return ps.cs.Done()
-}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
deleted file mode 100644
index e8d866984..000000000
--- a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- *
- * Copyright 2022 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpcutil
-
-import (
- "strings"
-)
-
-// RegisteredCompressorNames holds names of the registered compressors.
-var RegisteredCompressorNames []string
-
-// IsCompressorNameRegistered returns true when name is available in registry.
-func IsCompressorNameRegistered(name string) bool {
- for _, compressor := range RegisteredCompressorNames {
- if compressor == name {
- return true
- }
- }
- return false
-}
-
-// RegisteredCompressors returns a string of registered compressor names
-// separated by comma.
-func RegisteredCompressors() string {
- return strings.Join(RegisteredCompressorNames, ",")
-}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
deleted file mode 100644
index b25b0baec..000000000
--- a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpcutil
-
-import (
- "strconv"
- "time"
-)
-
-const maxTimeoutValue int64 = 100000000 - 1
-
-// div does integer division and round-up the result. Note that this is
-// equivalent to (d+r-1)/r but has less chance to overflow.
-func div(d, r time.Duration) int64 {
- if d%r > 0 {
- return int64(d/r + 1)
- }
- return int64(d / r)
-}
-
-// EncodeDuration encodes the duration to the format grpc-timeout header
-// accepts.
-//
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
-func EncodeDuration(t time.Duration) string {
- // TODO: This is simplistic and not bandwidth efficient. Improve it.
- if t <= 0 {
- return "0n"
- }
- if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "n"
- }
- if d := div(t, time.Microsecond); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "u"
- }
- if d := div(t, time.Millisecond); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "m"
- }
- if d := div(t, time.Second); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "S"
- }
- if d := div(t, time.Minute); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "M"
- }
- // Note that maxTimeoutValue * time.Hour > MaxInt64.
- return strconv.FormatInt(div(t, time.Hour), 10) + "H"
-}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
deleted file mode 100644
index e2f948e8f..000000000
--- a/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *
- * Copyright 2021 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpcutil provides utility functions used across the gRPC codebase.
-package grpcutil
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
deleted file mode 100644
index 6f22bd891..000000000
--- a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpcutil
-
-import (
- "context"
-
- "google.golang.org/grpc/metadata"
-)
-
-type mdExtraKey struct{}
-
-// WithExtraMetadata creates a new context with incoming md attached.
-func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context {
- return context.WithValue(ctx, mdExtraKey{}, md)
-}
-
-// ExtraMetadata returns the incoming metadata in ctx if it exists. The
-// returned MD should not be modified. Writing to it may cause races.
-// Modification should be made to copies of the returned MD.
-func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) {
- md, ok = ctx.Value(mdExtraKey{}).(metadata.MD)
- return
-}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
deleted file mode 100644
index 683d1955c..000000000
--- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpcutil
-
-import (
- "errors"
- "strings"
-)
-
-// ParseMethod splits service and method from the input. It expects format
-// "/service/method".
-func ParseMethod(methodName string) (service, method string, _ error) {
- if !strings.HasPrefix(methodName, "/") {
- return "", "", errors.New("invalid method name: should start with /")
- }
- methodName = methodName[1:]
-
- pos := strings.LastIndex(methodName, "/")
- if pos < 0 {
- return "", "", errors.New("invalid method name: suffix /method is missing")
- }
- return methodName[:pos], methodName[pos+1:], nil
-}
-
-// baseContentType is the base content-type for gRPC. This is a valid
-// content-type on its own, but can also include a content-subtype such as
-// "proto" as a suffix after "+" or ";". See
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
-// for more details.
-const baseContentType = "application/grpc"
-
-// ContentSubtype returns the content-subtype for the given content-type. The
-// given content-type must be a valid content-type that starts with
-// "application/grpc". A content-subtype will follow "application/grpc" after a
-// "+" or ";". See
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-//
-// If contentType is not a valid content-type for gRPC, the boolean
-// will be false, otherwise true. If content-type == "application/grpc",
-// "application/grpc+", or "application/grpc;", the boolean will be true,
-// but no content-subtype will be returned.
-//
-// contentType is assumed to be lowercase already.
-func ContentSubtype(contentType string) (string, bool) {
- if contentType == baseContentType {
- return "", true
- }
- if !strings.HasPrefix(contentType, baseContentType) {
- return "", false
- }
- // guaranteed since != baseContentType and has baseContentType prefix
- switch contentType[len(baseContentType)] {
- case '+', ';':
- // this will return true for "application/grpc+" or "application/grpc;"
- // which the previous validContentType function tested to be valid, so we
- // just say that no content-subtype is specified in this case
- return contentType[len(baseContentType)+1:], true
- default:
- return "", false
- }
-}
-
-// ContentType builds full content type with the given sub-type.
-//
-// contentSubtype is assumed to be lowercase
-func ContentType(contentSubtype string) string {
- if contentSubtype == "" {
- return baseContentType
- }
- return baseContentType + "+" + contentSubtype
-}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go
deleted file mode 100644
index 7a092b2b8..000000000
--- a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- *
- * Copyright 2021 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpcutil
-
-import "regexp"
-
-// FullMatchWithRegex returns whether the full text matches the regex provided.
-func FullMatchWithRegex(re *regexp.Regexp, text string) bool {
- if len(text) == 0 {
- return re.MatchString(text)
- }
- re.Longest()
- rem := re.FindString(text)
- return len(rem) == len(text)
-}
diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go
deleted file mode 100644
index 2c13ee9da..000000000
--- a/vendor/google.golang.org/grpc/internal/idle/idle.go
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- *
- * Copyright 2023 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package idle contains a component for managing idleness (entering and exiting)
-// based on RPC activity.
-package idle
-
-import (
- "fmt"
- "math"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// For overriding in unit tests.
-var timeAfterFunc = func(d time.Duration, f func()) *time.Timer {
- return time.AfterFunc(d, f)
-}
-
-// Enforcer is the functionality provided by grpc.ClientConn to enter
-// and exit from idle mode.
-type Enforcer interface {
- ExitIdleMode() error
- EnterIdleMode()
-}
-
-// Manager implements idleness detection and calls the configured Enforcer to
-// enter/exit idle mode when appropriate. Must be created by NewManager.
-type Manager struct {
- // State accessed atomically.
- lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed.
- activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there.
- activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback.
- closed int32 // Boolean; True when the manager is closed.
-
- // Can be accessed without atomics or mutex since these are set at creation
- // time and read-only after that.
- enforcer Enforcer // Functionality provided by grpc.ClientConn.
- timeout time.Duration
-
- // idleMu is used to guarantee mutual exclusion in two scenarios:
- // - Opposing intentions:
- // - a: Idle timeout has fired and handleIdleTimeout() is trying to put
- // the channel in idle mode because the channel has been inactive.
- // - b: At the same time an RPC is made on the channel, and OnCallBegin()
- // is trying to prevent the channel from going idle.
- // - Competing intentions:
- // - The channel is in idle mode and there are multiple RPCs starting at
- // the same time, all trying to move the channel out of idle. Only one
- // of them should succeed in doing so, while the other RPCs should
- // piggyback on the first one and be successfully handled.
- idleMu sync.RWMutex
- actuallyIdle bool
- timer *time.Timer
-}
-
-// NewManager creates a new idleness manager implementation for the
-// given idle timeout. It begins in idle mode.
-func NewManager(enforcer Enforcer, timeout time.Duration) *Manager {
- return &Manager{
- enforcer: enforcer,
- timeout: timeout,
- actuallyIdle: true,
- activeCallsCount: -math.MaxInt32,
- }
-}
-
-// resetIdleTimerLocked resets the idle timer to the given duration. Called
-// when exiting idle mode or when the timer fires and we need to reset it.
-func (m *Manager) resetIdleTimerLocked(d time.Duration) {
- if m.isClosed() || m.timeout == 0 || m.actuallyIdle {
- return
- }
-
- // It is safe to ignore the return value from Reset() because this method is
- // only ever called from the timer callback or when exiting idle mode.
- if m.timer != nil {
- m.timer.Stop()
- }
- m.timer = timeAfterFunc(d, m.handleIdleTimeout)
-}
-
-func (m *Manager) resetIdleTimer(d time.Duration) {
- m.idleMu.Lock()
- defer m.idleMu.Unlock()
- m.resetIdleTimerLocked(d)
-}
-
-// handleIdleTimeout is the timer callback that is invoked upon expiry of the
-// configured idle timeout. The channel is considered inactive if there are no
-// ongoing calls and no RPC activity since the last time the timer fired.
-func (m *Manager) handleIdleTimeout() {
- if m.isClosed() {
- return
- }
-
- if atomic.LoadInt32(&m.activeCallsCount) > 0 {
- m.resetIdleTimer(m.timeout)
- return
- }
-
- // There has been activity on the channel since we last got here. Reset the
- // timer and return.
- if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
- // Set the timer to fire after a duration of idle timeout, calculated
- // from the time the most recent RPC completed.
- atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0)
- m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout)
- return
- }
-
- // Now that we've checked that there has been no activity, attempt to enter
- // idle mode, which is very likely to succeed.
- if m.tryEnterIdleMode() {
- // Successfully entered idle mode. No timer needed until we exit idle.
- return
- }
-
- // Failed to enter idle mode due to a concurrent RPC that kept the channel
- // active, or because of an error from the channel. Undo the attempt to
- // enter idle, and reset the timer to try again later.
- m.resetIdleTimer(m.timeout)
-}
-
-// tryEnterIdleMode instructs the channel to enter idle mode. But before
-// that, it performs a last minute check to ensure that no new RPC has come in,
-// making the channel active.
-//
-// Return value indicates whether or not the channel moved to idle mode.
-//
-// Holds idleMu which ensures mutual exclusion with exitIdleMode.
-func (m *Manager) tryEnterIdleMode() bool {
- // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin()
- // that the channel is either in idle mode or is trying to get there.
- if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) {
- // This CAS operation can fail if an RPC started after we checked for
- // activity in the timer handler, or one was ongoing from before the
- // last time the timer fired, or if a test is attempting to enter idle
- // mode without checking. In all cases, abort going into idle mode.
- return false
- }
- // N.B. if we fail to enter idle mode after this, we must re-add
- // math.MaxInt32 to m.activeCallsCount.
-
- m.idleMu.Lock()
- defer m.idleMu.Unlock()
-
- if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 {
- // We raced and lost to a new RPC. Very rare, but stop entering idle.
- atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
- return false
- }
- if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
- // A very short RPC could have come in (and also finished) after we
- // checked for calls count and activity in handleIdleTimeout(), but
- // before the CAS operation. So, we need to check for activity again.
- atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
- return false
- }
-
- // No new RPCs have come in since we set the active calls count value to
- // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode
- // unconditionally now.
- m.enforcer.EnterIdleMode()
- m.actuallyIdle = true
- return true
-}
-
-// EnterIdleModeForTesting instructs the channel to enter idle mode.
-func (m *Manager) EnterIdleModeForTesting() {
- m.tryEnterIdleMode()
-}
-
-// OnCallBegin is invoked at the start of every RPC.
-func (m *Manager) OnCallBegin() error {
- if m.isClosed() {
- return nil
- }
-
- if atomic.AddInt32(&m.activeCallsCount, 1) > 0 {
- // Channel is not idle now. Set the activity bit and allow the call.
- atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
- return nil
- }
-
- // Channel is either in idle mode or is in the process of moving to idle
- // mode. Attempt to exit idle mode to allow this RPC.
- if err := m.ExitIdleMode(); err != nil {
- // Undo the increment to calls count, and return an error causing the
- // RPC to fail.
- atomic.AddInt32(&m.activeCallsCount, -1)
- return err
- }
-
- atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
- return nil
-}
-
-// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's
-// internal state.
-func (m *Manager) ExitIdleMode() error {
- // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
- m.idleMu.Lock()
- defer m.idleMu.Unlock()
-
- if m.isClosed() || !m.actuallyIdle {
- // This can happen in three scenarios:
- // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called
- // tryEnterIdleMode(). But before the latter could grab the lock, an RPC
- // came in and OnCallBegin() noticed that the calls count is negative.
- // - Channel is in idle mode, and multiple new RPCs come in at the same
- // time, all of them notice a negative calls count in OnCallBegin and get
- // here. The first one to get the lock would get the channel to exit idle.
- // - Channel is not in idle mode, and the user calls Connect which calls
- // m.ExitIdleMode.
- //
- // In any case, there is nothing to do here.
- return nil
- }
-
- if err := m.enforcer.ExitIdleMode(); err != nil {
- return fmt.Errorf("failed to exit idle mode: %w", err)
- }
-
- // Undo the idle entry process. This also respects any new RPC attempts.
- atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
- m.actuallyIdle = false
-
- // Start a new timer to fire after the configured idle timeout.
- m.resetIdleTimerLocked(m.timeout)
- return nil
-}
-
-// OnCallEnd is invoked at the end of every RPC.
-func (m *Manager) OnCallEnd() {
- if m.isClosed() {
- return
- }
-
- // Record the time at which the most recent call finished.
- atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano())
-
- // Decrement the active calls count. This count can temporarily go negative
- // when the timer callback is in the process of moving the channel to idle
- // mode, but one or more RPCs come in and complete before the timer callback
- // can get done with the process of moving to idle mode.
- atomic.AddInt32(&m.activeCallsCount, -1)
-}
-
-func (m *Manager) isClosed() bool {
- return atomic.LoadInt32(&m.closed) == 1
-}
-
-// Close stops the timer associated with the Manager, if it exists.
-func (m *Manager) Close() {
- atomic.StoreInt32(&m.closed, 1)
-
- m.idleMu.Lock()
- if m.timer != nil {
- m.timer.Stop()
- m.timer = nil
- }
- m.idleMu.Unlock()
-}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
deleted file mode 100644
index 13e1f386b..000000000
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package internal contains gRPC-internal code, to avoid polluting
-// the godoc of the top-level grpc package. It must not import any grpc
-// symbols to avoid circular dependencies.
-package internal
-
-import (
- "context"
- "time"
-
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/serviceconfig"
-)
-
-var (
- // HealthCheckFunc is used to provide client-side LB channel health checking
- HealthCheckFunc HealthChecker
- // RegisterClientHealthCheckListener is used to provide a listener for
- // updates from the client-side health checking service. It returns a
- // function that can be called to stop the health producer.
- RegisterClientHealthCheckListener any // func(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func()
- // BalancerUnregister is exported by package balancer to unregister a balancer.
- BalancerUnregister func(name string)
- // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
- // default, but tests may wish to set it lower for convenience.
- KeepaliveMinPingTime = 10 * time.Second
- // KeepaliveMinServerPingTime is the minimum ping interval for servers.
- // This must be 1s by default, but tests may wish to set it lower for
- // convenience.
- KeepaliveMinServerPingTime = time.Second
- // ParseServiceConfig parses a JSON representation of the service config.
- ParseServiceConfig any // func(string) *serviceconfig.ParseResult
- // EqualServiceConfigForTesting is for testing service config generation and
- // parsing. Both a and b should be returned by ParseServiceConfig.
- // This function compares the config without rawJSON stripped, in case the
- // there's difference in white space.
- EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
- // GetCertificateProviderBuilder returns the registered builder for the
- // given name. This is set by package certprovider for use from xDS
- // bootstrap code while parsing certificate provider configs in the
- // bootstrap file.
- GetCertificateProviderBuilder any // func(string) certprovider.Builder
- // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
- // stored in the passed in attributes. This is set by
- // credentials/xds/xds.go.
- GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer
- // GetServerCredentials returns the transport credentials configured on a
- // gRPC server. An xDS-enabled server needs to know what type of credentials
- // is configured on the underlying gRPC server. This is set by server.go.
- GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials
- // MetricsRecorderForServer returns the MetricsRecorderList derived from a
- // server's stats handlers.
- MetricsRecorderForServer any // func (*grpc.Server) estats.MetricsRecorder
- // CanonicalString returns the canonical string of the code defined here:
- // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
- //
- // This is used in the 1.0 release of gcp/observability, and thus must not be
- // deleted or changed.
- CanonicalString any // func (codes.Code) string
- // IsRegisteredMethod returns whether the passed in method is registered as
- // a method on the server.
- IsRegisteredMethod any // func(*grpc.Server, string) bool
- // ServerFromContext returns the server from the context.
- ServerFromContext any // func(context.Context) *grpc.Server
- // AddGlobalServerOptions adds an array of ServerOption that will be
- // effective globally for newly created servers. The priority will be: 1.
- // user-provided; 2. this method; 3. default values.
- //
- // This is used in the 1.0 release of gcp/observability, and thus must not be
- // deleted or changed.
- AddGlobalServerOptions any // func(opt ...ServerOption)
- // ClearGlobalServerOptions clears the array of extra ServerOption. This
- // method is useful in testing and benchmarking.
- //
- // This is used in the 1.0 release of gcp/observability, and thus must not be
- // deleted or changed.
- ClearGlobalServerOptions func()
- // AddGlobalDialOptions adds an array of DialOption that will be effective
- // globally for newly created client channels. The priority will be: 1.
- // user-provided; 2. this method; 3. default values.
- //
- // This is used in the 1.0 release of gcp/observability, and thus must not be
- // deleted or changed.
- AddGlobalDialOptions any // func(opt ...DialOption)
- // DisableGlobalDialOptions returns a DialOption that prevents the
- // ClientConn from applying the global DialOptions (set via
- // AddGlobalDialOptions).
- //
- // This is used in the 1.0 release of gcp/observability, and thus must not be
- // deleted or changed.
- DisableGlobalDialOptions any // func() grpc.DialOption
- // ClearGlobalDialOptions clears the array of extra DialOption. This
- // method is useful in testing and benchmarking.
- //
- // This is used in the 1.0 release of gcp/observability, and thus must not be
- // deleted or changed.
- ClearGlobalDialOptions func()
-
- // AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be
- // configured for newly created ClientConns.
- AddGlobalPerTargetDialOptions any // func (opt any)
- // ClearGlobalPerTargetDialOptions clears the slice of global late apply
- // dial options.
- ClearGlobalPerTargetDialOptions func()
-
- // JoinDialOptions combines the dial options passed as arguments into a
- // single dial option.
- JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption
- // JoinServerOptions combines the server options passed as arguments into a
- // single server option.
- JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption
-
- // WithBinaryLogger returns a DialOption that specifies the binary logger
- // for a ClientConn.
- //
- // This is used in the 1.0 release of gcp/observability, and thus must not be
- // deleted or changed.
- WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption
- // BinaryLogger returns a ServerOption that can set the binary logger for a
- // server.
- //
- // This is used in the 1.0 release of gcp/observability, and thus must not be
- // deleted or changed.
- BinaryLogger any // func(binarylog.Logger) grpc.ServerOption
-
- // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a
- // provided grpc.ClientConn.
- SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber)
-
- // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
- // the provided xds bootstrap config instead of the global configuration from
- // the supported environment variables. The resolver.Builder is meant to be
- // used in conjunction with the grpc.WithResolvers DialOption.
- //
- // Testing Only
- //
- // This function should ONLY be used for testing and may not work with some
- // other features, including the CSDS service.
- NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error)
-
- // NewXDSResolverWithPoolForTesting creates a new xDS resolver builder
- // using the provided xDS pool instead of creating a new one using the
- // bootstrap configuration specified by the supported environment variables.
- // The resolver.Builder is meant to be used in conjunction with the
- // grpc.WithResolvers DialOption. The resolver.Builder does not take
- // ownership of the provided xDS client and it is the responsibility of the
- // caller to close the client when no longer required.
- //
- // Testing Only
- //
- // This function should ONLY be used for testing and may not work with some
- // other features, including the CSDS service.
- NewXDSResolverWithPoolForTesting any // func(*xdsclient.Pool) (resolver.Builder, error)
-
- // NewXDSResolverWithClientForTesting creates a new xDS resolver builder
- // using the provided xDS client instead of creating a new one using the
- // bootstrap configuration specified by the supported environment variables.
- // The resolver.Builder is meant to be used in conjunction with the
- // grpc.WithResolvers DialOption. The resolver.Builder does not take
- // ownership of the provided xDS client and it is the responsibility of the
- // caller to close the client when no longer required.
- //
- // Testing Only
- //
- // This function should ONLY be used for testing and may not work with some
- // other features, including the CSDS service.
- NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error)
-
- // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
- // Specifier Plugin for testing purposes, regardless of the XDSRLS environment
- // variable.
- //
- // TODO: Remove this function once the RLS env var is removed.
- RegisterRLSClusterSpecifierPluginForTesting func()
-
- // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster
- // Specifier Plugin for testing purposes. This is needed because there is no way
- // to unregister the RLS Cluster Specifier Plugin after registering it solely
- // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting().
- //
- // TODO: Remove this function once the RLS env var is removed.
- UnregisterRLSClusterSpecifierPluginForTesting func()
-
- // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing
- // purposes, regardless of the RBAC environment variable.
- //
- // TODO: Remove this function once the RBAC env var is removed.
- RegisterRBACHTTPFilterForTesting func()
-
- // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for
- // testing purposes. This is needed because there is no way to unregister the
- // HTTP Filter after registering it solely for testing purposes using
- // RegisterRBACHTTPFilterForTesting().
- //
- // TODO: Remove this function once the RBAC env var is removed.
- UnregisterRBACHTTPFilterForTesting func()
-
- // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY.
- ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions)
-
- // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra
- // metadata to RPCs.
- GRPCResolverSchemeExtraMetadata = "xds"
-
- // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode.
- EnterIdleModeForTesting any // func(*grpc.ClientConn)
-
- // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode.
- ExitIdleModeForTesting any // func(*grpc.ClientConn) error
-
- // ChannelzTurnOffForTesting disables the Channelz service for testing
- // purposes.
- ChannelzTurnOffForTesting func()
-
- // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to
- // invoke resource-not-found error for the given resource type and name.
- TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error
-
- // FromOutgoingContextRaw returns the un-merged, intermediary contents of
- // metadata.rawMD.
- FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool)
-
- // UserSetDefaultScheme is set to true if the user has overridden the
- // default resolver scheme.
- UserSetDefaultScheme = false
-
- // ConnectedAddress returns the connected address for a SubConnState. The
- // address is only valid if the state is READY.
- ConnectedAddress any // func (scs SubConnState) resolver.Address
-
- // SetConnectedAddress sets the connected address for a SubConnState.
- SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address)
-
- // SnapshotMetricRegistryForTesting snapshots the global data of the metric
- // registry. Returns a cleanup function that sets the metric registry to its
- // original state. Only called in testing functions.
- SnapshotMetricRegistryForTesting func() func()
-
- // SetDefaultBufferPoolForTesting updates the default buffer pool, for
- // testing purposes.
- SetDefaultBufferPoolForTesting any // func(mem.BufferPool)
-
- // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for
- // testing purposes.
- SetBufferPoolingThresholdForTesting any // func(int)
-)
-
-// HealthChecker defines the signature of the client-side LB channel health
-// checking function.
-//
-// The implementation is expected to create a health checking RPC stream by
-// calling newStream(), watch for the health status of serviceName, and report
-// its health back by calling setConnectivityState().
-//
-// The health checking protocol is defined at:
-// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
-type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error
-
-const (
- // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
- CredsBundleModeFallback = "fallback"
- // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer
- // mode.
- CredsBundleModeBalancer = "balancer"
- // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode
- // that supports backend returned by grpclb balancer.
- CredsBundleModeBackendFromBalancer = "backend-from-balancer"
-)
-
-// RLSLoadBalancingPolicyName is the name of the RLS LB policy.
-//
-// It currently has an experimental suffix which would be removed once
-// end-to-end testing of the policy is completed.
-const RLSLoadBalancingPolicyName = "rls_experimental"
-
-// EnforceSubConnEmbedding is used to enforce proper SubConn implementation
-// embedding.
-type EnforceSubConnEmbedding interface {
- enforceSubConnEmbedding()
-}
-
-// EnforceClientConnEmbedding is used to enforce proper ClientConn implementation
-// embedding.
-type EnforceClientConnEmbedding interface {
- enforceClientConnEmbedding()
-}
diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
deleted file mode 100644
index 900bfb716..000000000
--- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package metadata contains functions to set and get metadata from addresses.
-//
-// This package is experimental.
-package metadata
-
-import (
- "fmt"
- "strings"
-
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/resolver"
-)
-
-type mdKeyType string
-
-const mdKey = mdKeyType("grpc.internal.address.metadata")
-
-type mdValue metadata.MD
-
-func (m mdValue) Equal(o any) bool {
- om, ok := o.(mdValue)
- if !ok {
- return false
- }
- if len(m) != len(om) {
- return false
- }
- for k, v := range m {
- ov := om[k]
- if len(ov) != len(v) {
- return false
- }
- for i, ve := range v {
- if ov[i] != ve {
- return false
- }
- }
- }
- return true
-}
-
-// Get returns the metadata of addr.
-func Get(addr resolver.Address) metadata.MD {
- attrs := addr.Attributes
- if attrs == nil {
- return nil
- }
- md, _ := attrs.Value(mdKey).(mdValue)
- return metadata.MD(md)
-}
-
-// Set sets (overrides) the metadata in addr.
-//
-// When a SubConn is created with this address, the RPCs sent on it will all
-// have this metadata.
-func Set(addr resolver.Address, md metadata.MD) resolver.Address {
- addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md))
- return addr
-}
-
-// Validate validates every pair in md with ValidatePair.
-func Validate(md metadata.MD) error {
- for k, vals := range md {
- if err := ValidatePair(k, vals...); err != nil {
- return err
- }
- }
- return nil
-}
-
-// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E
-func hasNotPrintable(msg string) bool {
- // for i that saving a conversion if not using for range
- for i := 0; i < len(msg); i++ {
- if msg[i] < 0x20 || msg[i] > 0x7E {
- return true
- }
- }
- return false
-}
-
-// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) :
-//
-// - key must contain one or more characters.
-// - the characters in the key must be contained in [0-9 a-z _ - .].
-// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed.
-// - the characters in the every value must be printable (in [%x20-%x7E]).
-func ValidatePair(key string, vals ...string) error {
- // key should not be empty
- if key == "" {
- return fmt.Errorf("there is an empty key in the header")
- }
- // pseudo-header will be ignored
- if key[0] == ':' {
- return nil
- }
- // check key, for i that saving a conversion if not using for range
- for i := 0; i < len(key); i++ {
- r := key[i]
- if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
- return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key)
- }
- }
- if strings.HasSuffix(key, "-bin") {
- return nil
- }
- // check value
- for _, val := range vals {
- if hasNotPrintable(val) {
- return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key)
- }
- }
- return nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go
deleted file mode 100644
index dbee7a60d..000000000
--- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- *
- * Copyright 2021 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package pretty defines helper functions to pretty-print structs for logging.
-package pretty
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
-
- "google.golang.org/protobuf/encoding/protojson"
- "google.golang.org/protobuf/protoadapt"
-)
-
-const jsonIndent = " "
-
-// ToJSON marshals the input into a json string.
-//
-// If marshal fails, it falls back to fmt.Sprintf("%+v").
-func ToJSON(e any) string {
- if ee, ok := e.(protoadapt.MessageV1); ok {
- e = protoadapt.MessageV2Of(ee)
- }
-
- if ee, ok := e.(protoadapt.MessageV2); ok {
- mm := protojson.MarshalOptions{
- Indent: jsonIndent,
- Multiline: true,
- }
- ret, err := mm.Marshal(ee)
- if err != nil {
- // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
- // messages are not imported, and this will fail because the message
- // is not found.
- return fmt.Sprintf("%+v", ee)
- }
- return string(ret)
- }
-
- ret, err := json.MarshalIndent(e, "", jsonIndent)
- if err != nil {
- return fmt.Sprintf("%+v", e)
- }
- return string(ret)
-}
-
-// FormatJSON formats the input json bytes with indentation.
-//
-// If Indent fails, it returns the unchanged input as string.
-func FormatJSON(b []byte) string {
- var out bytes.Buffer
- err := json.Indent(&out, b, "", jsonIndent)
- if err != nil {
- return string(b)
- }
- return out.String()
-}
diff --git a/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go b/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go
deleted file mode 100644
index 1f61f1a49..000000000
--- a/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package proxyattributes contains functions for getting and setting proxy
-// attributes like the CONNECT address and user info.
-package proxyattributes
-
-import (
- "net/url"
-
- "google.golang.org/grpc/resolver"
-)
-
-type keyType string
-
-const proxyOptionsKey = keyType("grpc.resolver.delegatingresolver.proxyOptions")
-
-// Options holds the proxy connection details needed during the CONNECT
-// handshake.
-type Options struct {
- User *url.Userinfo
- ConnectAddr string
-}
-
-// Set returns a copy of addr with opts set in its attributes.
-func Set(addr resolver.Address, opts Options) resolver.Address {
- addr.Attributes = addr.Attributes.WithValue(proxyOptionsKey, opts)
- return addr
-}
-
-// Get returns the Options for the proxy [resolver.Address] and a boolean
-// value representing if the attribute is present or not. The returned data
-// should not be mutated.
-func Get(addr resolver.Address) (Options, bool) {
- if a := addr.Attributes.Value(proxyOptionsKey); a != nil {
- return a.(Options), true
- }
- return Options{}, false
-}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go
deleted file mode 100644
index f0603871c..000000000
--- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package resolver provides internal resolver-related functionality.
-package resolver
-
-import (
- "context"
- "sync"
-
- "google.golang.org/grpc/internal/serviceconfig"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/resolver"
-)
-
-// ConfigSelector controls what configuration to use for every RPC.
-type ConfigSelector interface {
- // Selects the configuration for the RPC, or terminates it using the error.
- // This error will be converted by the gRPC library to a status error with
- // code UNKNOWN if it is not returned as a status error.
- SelectConfig(RPCInfo) (*RPCConfig, error)
-}
-
-// RPCInfo contains RPC information needed by a ConfigSelector.
-type RPCInfo struct {
- // Context is the user's context for the RPC and contains headers and
- // application timeout. It is passed for interception purposes and for
- // efficiency reasons. SelectConfig should not be blocking.
- Context context.Context
- Method string // i.e. "/Service/Method"
-}
-
-// RPCConfig describes the configuration to use for each RPC.
-type RPCConfig struct {
- // The context to use for the remainder of the RPC; can pass info to LB
- // policy or affect timeout or metadata.
- Context context.Context
- MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC
- OnCommitted func() // Called when the RPC has been committed (retries no longer possible)
- Interceptor ClientInterceptor
-}
-
-// ClientStream is the same as grpc.ClientStream, but defined here for circular
-// dependency reasons.
-type ClientStream interface {
- // Header returns the header metadata received from the server if there
- // is any. It blocks if the metadata is not ready to read.
- Header() (metadata.MD, error)
- // Trailer returns the trailer metadata from the server, if there is any.
- // It must only be called after stream.CloseAndRecv has returned, or
- // stream.Recv has returned a non-nil error (including io.EOF).
- Trailer() metadata.MD
- // CloseSend closes the send direction of the stream. It closes the stream
- // when non-nil error is met. It is also not safe to call CloseSend
- // concurrently with SendMsg.
- CloseSend() error
- // Context returns the context for this stream.
- //
- // It should not be called until after Header or RecvMsg has returned. Once
- // called, subsequent client-side retries are disabled.
- Context() context.Context
- // SendMsg is generally called by generated code. On error, SendMsg aborts
- // the stream. If the error was generated by the client, the status is
- // returned directly; otherwise, io.EOF is returned and the status of
- // the stream may be discovered using RecvMsg.
- //
- // SendMsg blocks until:
- // - There is sufficient flow control to schedule m with the transport, or
- // - The stream is done, or
- // - The stream breaks.
- //
- // SendMsg does not wait until the message is received by the server. An
- // untimely stream closure may result in lost messages. To ensure delivery,
- // users should ensure the RPC completed successfully using RecvMsg.
- //
- // It is safe to have a goroutine calling SendMsg and another goroutine
- // calling RecvMsg on the same stream at the same time, but it is not safe
- // to call SendMsg on the same stream in different goroutines. It is also
- // not safe to call CloseSend concurrently with SendMsg.
- SendMsg(m any) error
- // RecvMsg blocks until it receives a message into m or the stream is
- // done. It returns io.EOF when the stream completes successfully. On
- // any other error, the stream is aborted and the error contains the RPC
- // status.
- //
- // It is safe to have a goroutine calling SendMsg and another goroutine
- // calling RecvMsg on the same stream at the same time, but it is not
- // safe to call RecvMsg on the same stream in different goroutines.
- RecvMsg(m any) error
-}
-
-// ClientInterceptor is an interceptor for gRPC client streams.
-type ClientInterceptor interface {
- // NewStream produces a ClientStream for an RPC which may optionally use
- // the provided function to produce a stream for delegation. Note:
- // RPCInfo.Context should not be used (will be nil).
- //
- // done is invoked when the RPC is finished using its connection, or could
- // not be assigned a connection. RPC operations may still occur on
- // ClientStream after done is called, since the interceptor is invoked by
- // application-layer operations. done must never be nil when called.
- NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error)
-}
-
-// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side.
-type ServerInterceptor interface {
- // AllowRPC checks if an incoming RPC is allowed to proceed based on
- // information about connection RPC was received on, and HTTP Headers. This
- // information will be piped into context.
- AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting.
-}
-
-type csKeyType string
-
-const csKey = csKeyType("grpc.internal.resolver.configSelector")
-
-// SetConfigSelector sets the config selector in state and returns the new
-// state.
-func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State {
- state.Attributes = state.Attributes.WithValue(csKey, cs)
- return state
-}
-
-// GetConfigSelector retrieves the config selector from state, if present, and
-// returns it or nil if absent.
-func GetConfigSelector(state resolver.State) ConfigSelector {
- cs, _ := state.Attributes.Value(csKey).(ConfigSelector)
- return cs
-}
-
-// SafeConfigSelector allows for safe switching of ConfigSelector
-// implementations such that previous values are guaranteed to not be in use
-// when UpdateConfigSelector returns.
-type SafeConfigSelector struct {
- mu sync.RWMutex
- cs ConfigSelector
-}
-
-// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until
-// all uses of the previous ConfigSelector have completed.
-func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) {
- scs.mu.Lock()
- defer scs.mu.Unlock()
- scs.cs = cs
-}
-
-// SelectConfig defers to the current ConfigSelector in scs.
-func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) {
- scs.mu.RLock()
- defer scs.mu.RUnlock()
- return scs.cs.SelectConfig(r)
-}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
deleted file mode 100644
index 7b93f692b..000000000
--- a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package delegatingresolver implements a resolver capable of resolving both
-// target URIs and proxy addresses.
-package delegatingresolver
-
-import (
- "fmt"
- "net/http"
- "net/url"
- "sync"
-
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/proxyattributes"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/serviceconfig"
-)
-
-var (
- logger = grpclog.Component("delegating-resolver")
- // HTTPSProxyFromEnvironment will be overwritten in the tests
- HTTPSProxyFromEnvironment = http.ProxyFromEnvironment
-)
-
-// delegatingResolver manages both target URI and proxy address resolution by
-// delegating these tasks to separate child resolvers. Essentially, it acts as
-// a intermediary between the gRPC ClientConn and the child resolvers.
-//
-// It implements the [resolver.Resolver] interface.
-type delegatingResolver struct {
- target resolver.Target // parsed target URI to be resolved
- cc resolver.ClientConn // gRPC ClientConn
- proxyURL *url.URL // proxy URL, derived from proxy environment and target
-
- mu sync.Mutex // protects all the fields below
- targetResolverState *resolver.State // state of the target resolver
- proxyAddrs []resolver.Address // resolved proxy addresses; empty if no proxy is configured
-
- // childMu serializes calls into child resolvers. It also protects access to
- // the following fields.
- childMu sync.Mutex
- targetResolver resolver.Resolver // resolver for the target URI, based on its scheme
- proxyResolver resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured
-}
-
-// nopResolver is a resolver that does nothing.
-type nopResolver struct{}
-
-func (nopResolver) ResolveNow(resolver.ResolveNowOptions) {}
-
-func (nopResolver) Close() {}
-
-// proxyURLForTarget determines the proxy URL for the given address based on
-// the environment. It can return the following:
-// - nil URL, nil error: No proxy is configured or the address is excluded
-// using the `NO_PROXY` environment variable or if req.URL.Host is
-// "localhost" (with or without // a port number)
-// - nil URL, non-nil error: An error occurred while retrieving the proxy URL.
-// - non-nil URL, nil error: A proxy is configured, and the proxy URL was
-// retrieved successfully without any errors.
-func proxyURLForTarget(address string) (*url.URL, error) {
- req := &http.Request{URL: &url.URL{
- Scheme: "https",
- Host: address,
- }}
- return HTTPSProxyFromEnvironment(req)
-}
-
-// New creates a new delegating resolver that can create up to two child
-// resolvers:
-// - one to resolve the proxy address specified using the supported
-// environment variables. This uses the registered resolver for the "dns"
-// scheme.
-// - one to resolve the target URI using the resolver specified by the scheme
-// in the target URI or specified by the user using the WithResolvers dial
-// option. As a special case, if the target URI's scheme is "dns" and a
-// proxy is specified using the supported environment variables, the target
-// URI's path portion is used as the resolved address unless target
-// resolution is enabled using the dial option.
-func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions, targetResolverBuilder resolver.Builder, targetResolutionEnabled bool) (resolver.Resolver, error) {
- r := &delegatingResolver{
- target: target,
- cc: cc,
- }
-
- var err error
- r.proxyURL, err = proxyURLForTarget(target.Endpoint())
- if err != nil {
- return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err)
- }
-
- // proxy is not configured or proxy address excluded using `NO_PROXY` env
- // var, so only target resolver is used.
- if r.proxyURL == nil {
- return targetResolverBuilder.Build(target, cc, opts)
- }
-
- if logger.V(2) {
- logger.Infof("Proxy URL detected : %s", r.proxyURL)
- }
-
- // Resolver updates from one child may trigger calls into the other. Block
- // updates until the children are initialized.
- r.childMu.Lock()
- defer r.childMu.Unlock()
- // When the scheme is 'dns' and target resolution on client is not enabled,
- // resolution should be handled by the proxy, not the client. Therefore, we
- // bypass the target resolver and store the unresolved target address.
- if target.URL.Scheme == "dns" && !targetResolutionEnabled {
- state := resolver.State{
- Addresses: []resolver.Address{{Addr: target.Endpoint()}},
- Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}},
- }
- r.targetResolverState = &state
- } else {
- wcc := &wrappingClientConn{
- stateListener: r.updateTargetResolverState,
- parent: r,
- }
- if r.targetResolver, err = targetResolverBuilder.Build(target, wcc, opts); err != nil {
- return nil, fmt.Errorf("delegating_resolver: unable to build the resolver for target %s: %v", target, err)
- }
- }
-
- if r.proxyResolver, err = r.proxyURIResolver(opts); err != nil {
- return nil, fmt.Errorf("delegating_resolver: failed to build resolver for proxy URL %q: %v", r.proxyURL, err)
- }
-
- if r.targetResolver == nil {
- r.targetResolver = nopResolver{}
- }
- if r.proxyResolver == nil {
- r.proxyResolver = nopResolver{}
- }
- return r, nil
-}
-
-// proxyURIResolver creates a resolver for resolving proxy URIs using the
-// "dns" scheme. It adjusts the proxyURL to conform to the "dns:///" format and
-// builds a resolver with a wrappingClientConn to capture resolved addresses.
-func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resolver.Resolver, error) {
- proxyBuilder := resolver.Get("dns")
- if proxyBuilder == nil {
- panic("delegating_resolver: resolver for proxy not found for scheme dns")
- }
- url := *r.proxyURL
- url.Scheme = "dns"
- url.Path = "/" + r.proxyURL.Host
- url.Host = "" // Clear the Host field to conform to the "dns:///" format
-
- proxyTarget := resolver.Target{URL: url}
- wcc := &wrappingClientConn{
- stateListener: r.updateProxyResolverState,
- parent: r,
- }
- return proxyBuilder.Build(proxyTarget, wcc, opts)
-}
-
-func (r *delegatingResolver) ResolveNow(o resolver.ResolveNowOptions) {
- r.childMu.Lock()
- defer r.childMu.Unlock()
- r.targetResolver.ResolveNow(o)
- r.proxyResolver.ResolveNow(o)
-}
-
-func (r *delegatingResolver) Close() {
- r.childMu.Lock()
- defer r.childMu.Unlock()
- r.targetResolver.Close()
- r.targetResolver = nil
-
- r.proxyResolver.Close()
- r.proxyResolver = nil
-}
-
-// updateClientConnStateLocked creates a list of combined addresses by
-// pairing each proxy address with every target address. For each pair, it
-// generates a new [resolver.Address] using the proxy address, and adding the
-// target address as the attribute along with user info. It returns nil if
-// either resolver has not sent update even once and returns the error from
-// ClientConn update once both resolvers have sent update atleast once.
-func (r *delegatingResolver) updateClientConnStateLocked() error {
- if r.targetResolverState == nil || r.proxyAddrs == nil {
- return nil
- }
-
- curState := *r.targetResolverState
- // If multiple resolved proxy addresses are present, we send only the
- // unresolved proxy host and let net.Dial handle the proxy host name
- // resolution when creating the transport. Sending all resolved addresses
- // would increase the number of addresses passed to the ClientConn and
- // subsequently to load balancing (LB) policies like Round Robin, leading
- // to additional TCP connections. However, if there's only one resolved
- // proxy address, we send it directly, as it doesn't affect the address
- // count returned by the target resolver and the address count sent to the
- // ClientConn.
- var proxyAddr resolver.Address
- if len(r.proxyAddrs) == 1 {
- proxyAddr = r.proxyAddrs[0]
- } else {
- proxyAddr = resolver.Address{Addr: r.proxyURL.Host}
- }
- var addresses []resolver.Address
- for _, targetAddr := range (*r.targetResolverState).Addresses {
- addresses = append(addresses, proxyattributes.Set(proxyAddr, proxyattributes.Options{
- User: r.proxyURL.User,
- ConnectAddr: targetAddr.Addr,
- }))
- }
-
- // Create a list of combined endpoints by pairing all proxy endpoints
- // with every target endpoint. Each time, it constructs a new
- // [resolver.Endpoint] using the all addresses from all the proxy endpoint
- // and the target addresses from one endpoint. The target address and user
- // information from the proxy URL are added as attributes to the proxy
- // address.The resulting list of addresses is then grouped into endpoints,
- // covering all combinations of proxy and target endpoints.
- var endpoints []resolver.Endpoint
- for _, endpt := range (*r.targetResolverState).Endpoints {
- var addrs []resolver.Address
- for _, proxyAddr := range r.proxyAddrs {
- for _, targetAddr := range endpt.Addresses {
- addrs = append(addrs, proxyattributes.Set(proxyAddr, proxyattributes.Options{
- User: r.proxyURL.User,
- ConnectAddr: targetAddr.Addr,
- }))
- }
- }
- endpoints = append(endpoints, resolver.Endpoint{Addresses: addrs})
- }
- // Use the targetResolverState for its service config and attributes
- // contents. The state update is only sent after both the target and proxy
- // resolvers have sent their updates, and curState has been updated with
- // the combined addresses.
- curState.Addresses = addresses
- curState.Endpoints = endpoints
- return r.cc.UpdateState(curState)
-}
-
-// updateProxyResolverState updates the proxy resolver state by storing proxy
-// addresses and endpoints, marking the resolver as ready, and triggering a
-// state update if both proxy and target resolvers are ready. If the ClientConn
-// returns a non-nil error, it calls `ResolveNow()` on the target resolver. It
-// is a StateListener function of wrappingClientConn passed to the proxy resolver.
-func (r *delegatingResolver) updateProxyResolverState(state resolver.State) error {
- r.mu.Lock()
- defer r.mu.Unlock()
- if logger.V(2) {
- logger.Infof("Addresses received from proxy resolver: %s", state.Addresses)
- }
- if len(state.Endpoints) > 0 {
- // We expect exactly one address per endpoint because the proxy
- // resolver uses "dns" resolution.
- r.proxyAddrs = make([]resolver.Address, 0, len(state.Endpoints))
- for _, endpoint := range state.Endpoints {
- r.proxyAddrs = append(r.proxyAddrs, endpoint.Addresses...)
- }
- } else if state.Addresses != nil {
- r.proxyAddrs = state.Addresses
- } else {
- r.proxyAddrs = []resolver.Address{} // ensure proxyAddrs is non-nil to indicate an update has been received
- }
- err := r.updateClientConnStateLocked()
- // Another possible approach was to block until updates are received from
- // both resolvers. But this is not used because calling `New()` triggers
- // `Build()` for the first resolver, which calls `UpdateState()`. And the
- // second resolver hasn't sent an update yet, so it would cause `New()` to
- // block indefinitely.
- if err != nil {
- go func() {
- r.childMu.Lock()
- defer r.childMu.Unlock()
- if r.targetResolver != nil {
- r.targetResolver.ResolveNow(resolver.ResolveNowOptions{})
- }
- }()
- }
- return err
-}
-
-// updateTargetResolverState updates the target resolver state by storing target
-// addresses, endpoints, and service config, marking the resolver as ready, and
-// triggering a state update if both resolvers are ready. If the ClientConn
-// returns a non-nil error, it calls `ResolveNow()` on the proxy resolver. It
-// is a StateListener function of wrappingClientConn passed to the target resolver.
-func (r *delegatingResolver) updateTargetResolverState(state resolver.State) error {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- if logger.V(2) {
- logger.Infof("Addresses received from target resolver: %v", state.Addresses)
- }
- r.targetResolverState = &state
- err := r.updateClientConnStateLocked()
- if err != nil {
- go func() {
- r.childMu.Lock()
- defer r.childMu.Unlock()
- if r.proxyResolver != nil {
- r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{})
- }
- }()
- }
- return nil
-}
-
-// wrappingClientConn serves as an intermediary between the parent ClientConn
-// and the child resolvers created here. It implements the resolver.ClientConn
-// interface and is passed in that capacity to the child resolvers.
-type wrappingClientConn struct {
- // Callback to deliver resolver state updates
- stateListener func(state resolver.State) error
- parent *delegatingResolver
-}
-
-// UpdateState receives resolver state updates and forwards them to the
-// appropriate listener function (either for the proxy or target resolver).
-func (wcc *wrappingClientConn) UpdateState(state resolver.State) error {
- return wcc.stateListener(state)
-}
-
-// ReportError intercepts errors from the child resolvers and passes them to ClientConn.
-func (wcc *wrappingClientConn) ReportError(err error) {
- wcc.parent.cc.ReportError(err)
-}
-
-// NewAddress intercepts the new resolved address from the child resolvers and
-// passes them to ClientConn.
-func (wcc *wrappingClientConn) NewAddress(addrs []resolver.Address) {
- wcc.UpdateState(resolver.State{Addresses: addrs})
-}
-
-// ParseServiceConfig parses the provided service config and returns an
-// object that provides the parsed config.
-func (wcc *wrappingClientConn) ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult {
- return wcc.parent.cc.ParseServiceConfig(serviceConfigJSON)
-}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
deleted file mode 100644
index ba5c5a95d..000000000
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
+++ /dev/null
@@ -1,461 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package dns implements a dns resolver to be installed as the default resolver
-// in grpc.
-package dns
-
-import (
- "context"
- "encoding/json"
- "fmt"
- rand "math/rand/v2"
- "net"
- "net/netip"
- "os"
- "strconv"
- "strings"
- "sync"
- "time"
-
- grpclbstate "google.golang.org/grpc/balancer/grpclb/state"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/backoff"
- "google.golang.org/grpc/internal/envconfig"
- "google.golang.org/grpc/internal/resolver/dns/internal"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/serviceconfig"
-)
-
-var (
- // EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
- // addresses from SRV records. Must not be changed after init time.
- EnableSRVLookups = false
-
- // MinResolutionInterval is the minimum interval at which re-resolutions are
- // allowed. This helps to prevent excessive re-resolution.
- MinResolutionInterval = 30 * time.Second
-
- // ResolvingTimeout specifies the maximum duration for a DNS resolution request.
- // If the timeout expires before a response is received, the request will be canceled.
- //
- // It is recommended to set this value at application startup. Avoid modifying this variable
- // after initialization as it's not thread-safe for concurrent modification.
- ResolvingTimeout = 30 * time.Second
-
- logger = grpclog.Component("dns")
-)
-
-func init() {
- resolver.Register(NewBuilder())
- internal.TimeAfterFunc = time.After
- internal.TimeNowFunc = time.Now
- internal.TimeUntilFunc = time.Until
- internal.NewNetResolver = newNetResolver
- internal.AddressDialer = addressDialer
-}
-
-const (
- defaultPort = "443"
- defaultDNSSvrPort = "53"
- golang = "GO"
- // txtPrefix is the prefix string to be prepended to the host name for txt
- // record lookup.
- txtPrefix = "_grpc_config."
- // In DNS, service config is encoded in a TXT record via the mechanism
- // described in RFC-1464 using the attribute name grpc_config.
- txtAttribute = "grpc_config="
-)
-
-var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) {
- return func(ctx context.Context, network, _ string) (net.Conn, error) {
- var dialer net.Dialer
- return dialer.DialContext(ctx, network, address)
- }
-}
-
-var newNetResolver = func(authority string) (internal.NetResolver, error) {
- if authority == "" {
- return net.DefaultResolver, nil
- }
-
- host, port, err := parseTarget(authority, defaultDNSSvrPort)
- if err != nil {
- return nil, err
- }
-
- authorityWithPort := net.JoinHostPort(host, port)
-
- return &net.Resolver{
- PreferGo: true,
- Dial: internal.AddressDialer(authorityWithPort),
- }, nil
-}
-
-// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
-func NewBuilder() resolver.Builder {
- return &dnsBuilder{}
-}
-
-type dnsBuilder struct{}
-
-// Build creates and starts a DNS resolver that watches the name resolution of
-// the target.
-func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
- host, port, err := parseTarget(target.Endpoint(), defaultPort)
- if err != nil {
- return nil, err
- }
-
- // IP address.
- if ipAddr, err := formatIP(host); err == nil {
- addr := []resolver.Address{{Addr: ipAddr + ":" + port}}
- cc.UpdateState(resolver.State{Addresses: addr})
- return deadResolver{}, nil
- }
-
- // DNS address (non-IP).
- ctx, cancel := context.WithCancel(context.Background())
- d := &dnsResolver{
- host: host,
- port: port,
- ctx: ctx,
- cancel: cancel,
- cc: cc,
- rn: make(chan struct{}, 1),
- disableServiceConfig: opts.DisableServiceConfig,
- }
-
- d.resolver, err = internal.NewNetResolver(target.URL.Host)
- if err != nil {
- return nil, err
- }
-
- d.wg.Add(1)
- go d.watcher()
- return d, nil
-}
-
-// Scheme returns the naming scheme of this resolver builder, which is "dns".
-func (b *dnsBuilder) Scheme() string {
- return "dns"
-}
-
-// deadResolver is a resolver that does nothing.
-type deadResolver struct{}
-
-func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {}
-
-func (deadResolver) Close() {}
-
-// dnsResolver watches for the name resolution update for a non-IP target.
-type dnsResolver struct {
- host string
- port string
- resolver internal.NetResolver
- ctx context.Context
- cancel context.CancelFunc
- cc resolver.ClientConn
- // rn channel is used by ResolveNow() to force an immediate resolution of the
- // target.
- rn chan struct{}
- // wg is used to enforce Close() to return after the watcher() goroutine has
- // finished. Otherwise, data race will be possible. [Race Example] in
- // dns_resolver_test we replace the real lookup functions with mocked ones to
- // facilitate testing. If Close() doesn't wait for watcher() goroutine
- // finishes, race detector sometimes will warn lookup (READ the lookup
- // function pointers) inside watcher() goroutine has data race with
- // replaceNetFunc (WRITE the lookup function pointers).
- wg sync.WaitGroup
- disableServiceConfig bool
-}
-
-// ResolveNow invoke an immediate resolution of the target that this
-// dnsResolver watches.
-func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
- select {
- case d.rn <- struct{}{}:
- default:
- }
-}
-
-// Close closes the dnsResolver.
-func (d *dnsResolver) Close() {
- d.cancel()
- d.wg.Wait()
-}
-
-func (d *dnsResolver) watcher() {
- defer d.wg.Done()
- backoffIndex := 1
- for {
- state, err := d.lookup()
- if err != nil {
- // Report error to the underlying grpc.ClientConn.
- d.cc.ReportError(err)
- } else {
- err = d.cc.UpdateState(*state)
- }
-
- var nextResolutionTime time.Time
- if err == nil {
- // Success resolving, wait for the next ResolveNow. However, also wait 30
- // seconds at the very least to prevent constantly re-resolving.
- backoffIndex = 1
- nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval)
- select {
- case <-d.ctx.Done():
- return
- case <-d.rn:
- }
- } else {
- // Poll on an error found in DNS Resolver or an error received from
- // ClientConn.
- nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex))
- backoffIndex++
- }
- select {
- case <-d.ctx.Done():
- return
- case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)):
- }
- }
-}
-
-func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) {
- // Skip this particular host to avoid timeouts with some versions of
- // systemd-resolved.
- if !EnableSRVLookups || d.host == "metadata.google.internal." {
- return nil, nil
- }
- var newAddrs []resolver.Address
- _, srvs, err := d.resolver.LookupSRV(ctx, "grpclb", "tcp", d.host)
- if err != nil {
- err = handleDNSError(err, "SRV") // may become nil
- return nil, err
- }
- for _, s := range srvs {
- lbAddrs, err := d.resolver.LookupHost(ctx, s.Target)
- if err != nil {
- err = handleDNSError(err, "A") // may become nil
- if err == nil {
- // If there are other SRV records, look them up and ignore this
- // one that does not exist.
- continue
- }
- return nil, err
- }
- for _, a := range lbAddrs {
- ip, err := formatIP(a)
- if err != nil {
- return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err)
- }
- addr := ip + ":" + strconv.Itoa(int(s.Port))
- newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target})
- }
- }
- return newAddrs, nil
-}
-
-func handleDNSError(err error, lookupType string) error {
- dnsErr, ok := err.(*net.DNSError)
- if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
- // Timeouts and temporary errors should be communicated to gRPC to
- // attempt another DNS query (with backoff). Other errors should be
- // suppressed (they may represent the absence of a TXT record).
- return nil
- }
- if err != nil {
- err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err)
- logger.Info(err)
- }
- return err
-}
-
-func (d *dnsResolver) lookupTXT(ctx context.Context) *serviceconfig.ParseResult {
- ss, err := d.resolver.LookupTXT(ctx, txtPrefix+d.host)
- if err != nil {
- if envconfig.TXTErrIgnore {
- return nil
- }
- if err = handleDNSError(err, "TXT"); err != nil {
- return &serviceconfig.ParseResult{Err: err}
- }
- return nil
- }
- var res string
- for _, s := range ss {
- res += s
- }
-
- // TXT record must have "grpc_config=" attribute in order to be used as
- // service config.
- if !strings.HasPrefix(res, txtAttribute) {
- logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
- // This is not an error; it is the equivalent of not having a service
- // config.
- return nil
- }
- sc := canaryingSC(strings.TrimPrefix(res, txtAttribute))
- return d.cc.ParseServiceConfig(sc)
-}
-
-func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error) {
- addrs, err := d.resolver.LookupHost(ctx, d.host)
- if err != nil {
- err = handleDNSError(err, "A")
- return nil, err
- }
- newAddrs := make([]resolver.Address, 0, len(addrs))
- for _, a := range addrs {
- ip, err := formatIP(a)
- if err != nil {
- return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err)
- }
- addr := ip + ":" + d.port
- newAddrs = append(newAddrs, resolver.Address{Addr: addr})
- }
- return newAddrs, nil
-}
-
-func (d *dnsResolver) lookup() (*resolver.State, error) {
- ctx, cancel := context.WithTimeout(d.ctx, ResolvingTimeout)
- defer cancel()
- srv, srvErr := d.lookupSRV(ctx)
- addrs, hostErr := d.lookupHost(ctx)
- if hostErr != nil && (srvErr != nil || len(srv) == 0) {
- return nil, hostErr
- }
-
- state := resolver.State{Addresses: addrs}
- if len(srv) > 0 {
- state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
- }
- if !d.disableServiceConfig {
- state.ServiceConfig = d.lookupTXT(ctx)
- }
- return &state, nil
-}
-
-// formatIP returns an error if addr is not a valid textual representation of
-// an IP address. If addr is an IPv4 address, return the addr and error = nil.
-// If addr is an IPv6 address, return the addr enclosed in square brackets and
-// error = nil.
-func formatIP(addr string) (string, error) {
- ip, err := netip.ParseAddr(addr)
- if err != nil {
- return "", err
- }
- if ip.Is4() {
- return addr, nil
- }
- return "[" + addr + "]", nil
-}
-
-// parseTarget takes the user input target string and default port, returns
-// formatted host and port info. If target doesn't specify a port, set the port
-// to be the defaultPort. If target is in IPv6 format and host-name is enclosed
-// in square brackets, brackets are stripped when setting the host.
-// examples:
-// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
-// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
-// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443"
-// target: ":80" defaultPort: "443" returns host: "localhost", port: "80"
-func parseTarget(target, defaultPort string) (host, port string, err error) {
- if target == "" {
- return "", "", internal.ErrMissingAddr
- }
- if _, err := netip.ParseAddr(target); err == nil {
- // target is an IPv4 or IPv6(without brackets) address
- return target, defaultPort, nil
- }
- if host, port, err = net.SplitHostPort(target); err == nil {
- if port == "" {
- // If the port field is empty (target ends with colon), e.g. "[::1]:",
- // this is an error.
- return "", "", internal.ErrEndsWithColon
- }
- // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
- if host == "" {
- // Keep consistent with net.Dial(): If the host is empty, as in ":80",
- // the local system is assumed.
- host = "localhost"
- }
- return host, port, nil
- }
- if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil {
- // target doesn't have port
- return host, port, nil
- }
- return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err)
-}
-
-type rawChoice struct {
- ClientLanguage *[]string `json:"clientLanguage,omitempty"`
- Percentage *int `json:"percentage,omitempty"`
- ClientHostName *[]string `json:"clientHostName,omitempty"`
- ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"`
-}
-
-func containsString(a *[]string, b string) bool {
- if a == nil {
- return true
- }
- for _, c := range *a {
- if c == b {
- return true
- }
- }
- return false
-}
-
-func chosenByPercentage(a *int) bool {
- if a == nil {
- return true
- }
- return rand.IntN(100)+1 <= *a
-}
-
-func canaryingSC(js string) string {
- if js == "" {
- return ""
- }
- var rcs []rawChoice
- err := json.Unmarshal([]byte(js), &rcs)
- if err != nil {
- logger.Warningf("dns: error parsing service config json: %v", err)
- return ""
- }
- cliHostname, err := os.Hostname()
- if err != nil {
- logger.Warningf("dns: error getting client hostname: %v", err)
- return ""
- }
- var sc string
- for _, c := range rcs {
- if !containsString(c.ClientLanguage, golang) ||
- !chosenByPercentage(c.Percentage) ||
- !containsString(c.ClientHostName, cliHostname) ||
- c.ServiceConfig == nil {
- continue
- }
- sc = string(*c.ServiceConfig)
- break
- }
- return sc
-}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
deleted file mode 100644
index c0eae4f5f..000000000
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- *
- * Copyright 2023 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package internal contains functionality internal to the dns resolver package.
-package internal
-
-import (
- "context"
- "errors"
- "net"
- "time"
-)
-
-// NetResolver groups the methods on net.Resolver that are used by the DNS
-// resolver implementation. This allows the default net.Resolver instance to be
-// overridden from tests.
-type NetResolver interface {
- LookupHost(ctx context.Context, host string) (addrs []string, err error)
- LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
- LookupTXT(ctx context.Context, name string) (txts []string, err error)
-}
-
-var (
- // ErrMissingAddr is the error returned when building a DNS resolver when
- // the provided target name is empty.
- ErrMissingAddr = errors.New("dns resolver: missing address")
-
- // ErrEndsWithColon is the error returned when building a DNS resolver when
- // the provided target name ends with a colon that is supposed to be the
- // separator between host and port. E.g. "::" is a valid address as it is
- // an IPv6 address (host only) and "[::]:" is invalid as it ends with a
- // colon as the host and port separator
- ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
-)
-
-// The following vars are overridden from tests.
-var (
- // TimeAfterFunc is used by the DNS resolver to wait for the given duration
- // to elapse. In non-test code, this is implemented by time.After. In test
- // code, this can be used to control the amount of time the resolver is
- // blocked waiting for the duration to elapse.
- TimeAfterFunc func(time.Duration) <-chan time.Time
-
- // TimeNowFunc is used by the DNS resolver to get the current time.
- // In non-test code, this is implemented by time.Now. In test code,
- // this can be used to control the current time for the resolver.
- TimeNowFunc func() time.Time
-
- // TimeUntilFunc is used by the DNS resolver to calculate the remaining
- // wait time for re-resolution. In non-test code, this is implemented by
- // time.Until. In test code, this can be used to control the remaining
- // time for resolver to wait for re-resolution.
- TimeUntilFunc func(time.Time) time.Duration
-
- // NewNetResolver returns the net.Resolver instance for the given target.
- NewNetResolver func(string) (NetResolver, error)
-
- // AddressDialer is the dialer used to dial the DNS server. It accepts the
- // Host portion of the URL corresponding to the user's dial target and
- // returns a dial function.
- AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error)
-)
diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
deleted file mode 100644
index b901c7bac..000000000
--- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package passthrough implements a pass-through resolver. It sends the target
-// name without scheme back to gRPC as resolved address.
-package passthrough
-
-import (
- "errors"
-
- "google.golang.org/grpc/resolver"
-)
-
-const scheme = "passthrough"
-
-type passthroughBuilder struct{}
-
-func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
- if target.Endpoint() == "" && opts.Dialer == nil {
- return nil, errors.New("passthrough: received empty target in Build()")
- }
- r := &passthroughResolver{
- target: target,
- cc: cc,
- }
- r.start()
- return r, nil
-}
-
-func (*passthroughBuilder) Scheme() string {
- return scheme
-}
-
-type passthroughResolver struct {
- target resolver.Target
- cc resolver.ClientConn
-}
-
-func (r *passthroughResolver) start() {
- r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
-}
-
-func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {}
-
-func (*passthroughResolver) Close() {}
-
-func init() {
- resolver.Register(&passthroughBuilder{})
-}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
deleted file mode 100644
index 27cd81af9..000000000
--- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package unix implements a resolver for unix targets.
-package unix
-
-import (
- "fmt"
-
- "google.golang.org/grpc/internal/transport/networktype"
- "google.golang.org/grpc/resolver"
-)
-
-const unixScheme = "unix"
-const unixAbstractScheme = "unix-abstract"
-
-type builder struct {
- scheme string
-}
-
-func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
- if target.URL.Host != "" {
- return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host)
- }
-
- // gRPC was parsing the dial target manually before PR #4817, and we
- // switched to using url.Parse() in that PR. To avoid breaking existing
- // resolver implementations we ended up stripping the leading "/" from the
- // endpoint. This obviously does not work for the "unix" scheme. Hence we
- // end up using the parsed URL instead.
- endpoint := target.URL.Path
- if endpoint == "" {
- endpoint = target.URL.Opaque
- }
- addr := resolver.Address{Addr: endpoint}
- if b.scheme == unixAbstractScheme {
- // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do
- // not want trailing \0 in address.
- addr.Addr = "@" + addr.Addr
- }
- cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}})
- return &nopResolver{}, nil
-}
-
-func (b *builder) Scheme() string {
- return b.scheme
-}
-
-func (b *builder) OverrideAuthority(resolver.Target) string {
- return "localhost"
-}
-
-type nopResolver struct {
-}
-
-func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {}
-
-func (*nopResolver) Close() {}
-
-func init() {
- resolver.Register(&builder{scheme: unixScheme})
- resolver.Register(&builder{scheme: unixAbstractScheme})
-}
diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
deleted file mode 100644
index 11d82afcc..000000000
--- a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- *
- * Copyright 2023 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package serviceconfig
-
-import (
- "encoding/json"
- "fmt"
- "math"
- "strconv"
- "strings"
- "time"
-)
-
-// Duration defines JSON marshal and unmarshal methods to conform to the
-// protobuf JSON spec defined [here].
-//
-// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration
-type Duration time.Duration
-
-func (d Duration) String() string {
- return fmt.Sprint(time.Duration(d))
-}
-
-// MarshalJSON converts from d to a JSON string output.
-func (d Duration) MarshalJSON() ([]byte, error) {
- ns := time.Duration(d).Nanoseconds()
- sec := ns / int64(time.Second)
- ns = ns % int64(time.Second)
-
- var sign string
- if sec < 0 || ns < 0 {
- sign, sec, ns = "-", -1*sec, -1*ns
- }
-
- // Generated output always contains 0, 3, 6, or 9 fractional digits,
- // depending on required precision.
- str := fmt.Sprintf("%s%d.%09d", sign, sec, ns)
- str = strings.TrimSuffix(str, "000")
- str = strings.TrimSuffix(str, "000")
- str = strings.TrimSuffix(str, ".000")
- return []byte(fmt.Sprintf("\"%ss\"", str)), nil
-}
-
-// UnmarshalJSON unmarshals b as a duration JSON string into d.
-func (d *Duration) UnmarshalJSON(b []byte) error {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
- return err
- }
- if !strings.HasSuffix(s, "s") {
- return fmt.Errorf("malformed duration %q: missing seconds unit", s)
- }
- neg := false
- if s[0] == '-' {
- neg = true
- s = s[1:]
- }
- ss := strings.SplitN(s[:len(s)-1], ".", 3)
- if len(ss) > 2 {
- return fmt.Errorf("malformed duration %q: too many decimals", s)
- }
- // hasDigits is set if either the whole or fractional part of the number is
- // present, since both are optional but one is required.
- hasDigits := false
- var sec, ns int64
- if len(ss[0]) > 0 {
- var err error
- if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil {
- return fmt.Errorf("malformed duration %q: %v", s, err)
- }
- // Maximum seconds value per the durationpb spec.
- const maxProtoSeconds = 315_576_000_000
- if sec > maxProtoSeconds {
- return fmt.Errorf("out of range: %q", s)
- }
- hasDigits = true
- }
- if len(ss) == 2 && len(ss[1]) > 0 {
- if len(ss[1]) > 9 {
- return fmt.Errorf("malformed duration %q: too many digits after decimal", s)
- }
- var err error
- if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil {
- return fmt.Errorf("malformed duration %q: %v", s, err)
- }
- for i := 9; i > len(ss[1]); i-- {
- ns *= 10
- }
- hasDigits = true
- }
- if !hasDigits {
- return fmt.Errorf("malformed duration %q: contains no numbers", s)
- }
-
- if neg {
- sec *= -1
- ns *= -1
- }
-
- // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration.
- const maxSeconds = math.MaxInt64 / int64(time.Second)
- const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second)
- const minSeconds = math.MinInt64 / int64(time.Second)
- const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second)
-
- if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) {
- *d = Duration(math.MaxInt64)
- } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) {
- *d = Duration(math.MinInt64)
- } else {
- *d = Duration(sec*int64(time.Second) + ns)
- }
- return nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
deleted file mode 100644
index 51e733e49..000000000
--- a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package serviceconfig contains utility functions to parse service config.
-package serviceconfig
-
-import (
- "encoding/json"
- "fmt"
- "time"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- externalserviceconfig "google.golang.org/grpc/serviceconfig"
-)
-
-var logger = grpclog.Component("core")
-
-// BalancerConfig wraps the name and config associated with one load balancing
-// policy. It corresponds to a single entry of the loadBalancingConfig field
-// from ServiceConfig.
-//
-// It implements the json.Unmarshaler interface.
-//
-// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247
-type BalancerConfig struct {
- Name string
- Config externalserviceconfig.LoadBalancingConfig
-}
-
-type intermediateBalancerConfig []map[string]json.RawMessage
-
-// MarshalJSON implements the json.Marshaler interface.
-//
-// It marshals the balancer and config into a length-1 slice
-// ([]map[string]config).
-func (bc *BalancerConfig) MarshalJSON() ([]byte, error) {
- if bc.Config == nil {
- // If config is nil, return empty config `{}`.
- return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil
- }
- c, err := json.Marshal(bc.Config)
- if err != nil {
- return nil, err
- }
- return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-//
-// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
-// config. This method iterates through that list in order, and stops at the
-// first policy that is supported.
-// - If the config for the first supported policy is invalid, the whole service
-// config is invalid.
-// - If the list doesn't contain any supported policy, the whole service config
-// is invalid.
-func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
- var ir intermediateBalancerConfig
- err := json.Unmarshal(b, &ir)
- if err != nil {
- return err
- }
-
- var names []string
- for i, lbcfg := range ir {
- if len(lbcfg) != 1 {
- return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
- }
-
- var (
- name string
- jsonCfg json.RawMessage
- )
- // Get the key:value pair from the map. We have already made sure that
- // the map contains a single entry.
- for name, jsonCfg = range lbcfg {
- }
-
- names = append(names, name)
- builder := balancer.Get(name)
- if builder == nil {
- // If the balancer is not registered, move on to the next config.
- // This is not an error.
- continue
- }
- bc.Name = name
-
- parser, ok := builder.(balancer.ConfigParser)
- if !ok {
- if string(jsonCfg) != "{}" {
- logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
- }
- // Stop at this, though the builder doesn't support parsing config.
- return nil
- }
-
- cfg, err := parser.ParseConfig(jsonCfg)
- if err != nil {
- return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
- }
- bc.Config = cfg
- return nil
- }
- // This is reached when the for loop iterates over all entries, but didn't
- // return. This means we had a loadBalancingConfig slice but did not
- // encounter a registered policy. The config is considered invalid in this
- // case.
- return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names)
-}
-
-// MethodConfig defines the configuration recommended by the service providers for a
-// particular method.
-type MethodConfig struct {
- // WaitForReady indicates whether RPCs sent to this method should wait until
- // the connection is ready by default (!failfast). The value specified via the
- // gRPC client API will override the value set here.
- WaitForReady *bool
- // Timeout is the default timeout for RPCs sent to this method. The actual
- // deadline used will be the minimum of the value specified here and the value
- // set by the application via the gRPC client API. If either one is not set,
- // then the other will be used. If neither is set, then the RPC has no deadline.
- Timeout *time.Duration
- // MaxReqSize is the maximum allowed payload size for an individual request in a
- // stream (client->server) in bytes. The size which is measured is the serialized
- // payload after per-message compression (but before stream compression) in bytes.
- // The actual value used is the minimum of the value specified here and the value set
- // by the application via the gRPC client API. If either one is not set, then the other
- // will be used. If neither is set, then the built-in default is used.
- MaxReqSize *int
- // MaxRespSize is the maximum allowed payload size for an individual response in a
- // stream (server->client) in bytes.
- MaxRespSize *int
- // RetryPolicy configures retry options for the method.
- RetryPolicy *RetryPolicy
-}
-
-// RetryPolicy defines the go-native version of the retry policy defined by the
-// service config here:
-// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
-type RetryPolicy struct {
- // MaxAttempts is the maximum number of attempts, including the original RPC.
- //
- // This field is required and must be two or greater.
- MaxAttempts int
-
- // Exponential backoff parameters. The initial retry attempt will occur at
- // random(0, initialBackoff). In general, the nth attempt will occur at
- // random(0,
- // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)).
- //
- // These fields are required and must be greater than zero.
- InitialBackoff time.Duration
- MaxBackoff time.Duration
- BackoffMultiplier float64
-
- // The set of status codes which may be retried.
- //
- // Status codes are specified as strings, e.g., "UNAVAILABLE".
- //
- // This field is required and must be non-empty.
- // Note: a set is used to store this for easy lookup.
- RetryableStatusCodes map[codes.Code]bool
-}
diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go
deleted file mode 100644
index fd33af51a..000000000
--- a/vendor/google.golang.org/grpc/internal/stats/labels.go
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package stats provides internal stats related functionality.
-package stats
-
-import "context"
-
-// Labels are the labels for metrics.
-type Labels struct {
- // TelemetryLabels are the telemetry labels to record.
- TelemetryLabels map[string]string
-}
-
-type labelsKey struct{}
-
-// GetLabels returns the Labels stored in the context, or nil if there is one.
-func GetLabels(ctx context.Context) *Labels {
- labels, _ := ctx.Value(labelsKey{}).(*Labels)
- return labels
-}
-
-// SetLabels sets the Labels in the context.
-func SetLabels(ctx context.Context, labels *Labels) context.Context {
- // could also append
- return context.WithValue(ctx, labelsKey{}, labels)
-}
diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
deleted file mode 100644
index 79044657b..000000000
--- a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package stats
-
-import (
- "fmt"
-
- estats "google.golang.org/grpc/experimental/stats"
- "google.golang.org/grpc/stats"
-)
-
-// MetricsRecorderList forwards Record calls to all of its metricsRecorders.
-//
-// It eats any record calls where the label values provided do not match the
-// number of label keys.
-type MetricsRecorderList struct {
- // metricsRecorders are the metrics recorders this list will forward to.
- metricsRecorders []estats.MetricsRecorder
-}
-
-// NewMetricsRecorderList creates a new metric recorder list with all the stats
-// handlers provided which implement the MetricsRecorder interface.
-// If no stats handlers provided implement the MetricsRecorder interface,
-// the MetricsRecorder list returned is a no-op.
-func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList {
- var mrs []estats.MetricsRecorder
- for _, sh := range shs {
- if mr, ok := sh.(estats.MetricsRecorder); ok {
- mrs = append(mrs, mr)
- }
- }
- return &MetricsRecorderList{
- metricsRecorders: mrs,
- }
-}
-
-func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) {
- if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want {
- panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want))
- }
-}
-
-// RecordInt64Count records the measurement alongside labels on the int
-// count associated with the provided handle.
-func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) {
- verifyLabels(handle.Descriptor(), labels...)
-
- for _, metricRecorder := range l.metricsRecorders {
- metricRecorder.RecordInt64Count(handle, incr, labels...)
- }
-}
-
-// RecordFloat64Count records the measurement alongside labels on the float
-// count associated with the provided handle.
-func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {
- verifyLabels(handle.Descriptor(), labels...)
-
- for _, metricRecorder := range l.metricsRecorders {
- metricRecorder.RecordFloat64Count(handle, incr, labels...)
- }
-}
-
-// RecordInt64Histo records the measurement alongside labels on the int
-// histo associated with the provided handle.
-func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) {
- verifyLabels(handle.Descriptor(), labels...)
-
- for _, metricRecorder := range l.metricsRecorders {
- metricRecorder.RecordInt64Histo(handle, incr, labels...)
- }
-}
-
-// RecordFloat64Histo records the measurement alongside labels on the float
-// histo associated with the provided handle.
-func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) {
- verifyLabels(handle.Descriptor(), labels...)
-
- for _, metricRecorder := range l.metricsRecorders {
- metricRecorder.RecordFloat64Histo(handle, incr, labels...)
- }
-}
-
-// RecordInt64Gauge records the measurement alongside labels on the int
-// gauge associated with the provided handle.
-func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) {
- verifyLabels(handle.Descriptor(), labels...)
-
- for _, metricRecorder := range l.metricsRecorders {
- metricRecorder.RecordInt64Gauge(handle, incr, labels...)
- }
-}
diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
deleted file mode 100644
index 1186f1e9a..000000000
--- a/vendor/google.golang.org/grpc/internal/status/status.go
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package status implements errors returned by gRPC. These errors are
-// serialized and transmitted on the wire between server and client, and allow
-// for additional data to be transmitted via the Details field in the status
-// proto. gRPC service handlers should return an error created by this
-// package, and gRPC clients should expect a corresponding error to be
-// returned from the RPC call.
-//
-// This package upholds the invariants that a non-nil error may not
-// contain an OK code, and an OK code must result in a nil error.
-package status
-
-import (
- "errors"
- "fmt"
-
- spb "google.golang.org/genproto/googleapis/rpc/status"
- "google.golang.org/grpc/codes"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/protoadapt"
- "google.golang.org/protobuf/types/known/anypb"
-)
-
-// Status represents an RPC status code, message, and details. It is immutable
-// and should be created with New, Newf, or FromProto.
-type Status struct {
- s *spb.Status
-}
-
-// NewWithProto returns a new status including details from statusProto. This
-// is meant to be used by the gRPC library only.
-func NewWithProto(code codes.Code, message string, statusProto []string) *Status {
- if len(statusProto) != 1 {
- // No grpc-status-details bin header, or multiple; just ignore.
- return &Status{s: &spb.Status{Code: int32(code), Message: message}}
- }
- st := &spb.Status{}
- if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil {
- // Probably not a google.rpc.Status proto; do not provide details.
- return &Status{s: &spb.Status{Code: int32(code), Message: message}}
- }
- if st.Code == int32(code) {
- // The codes match between the grpc-status header and the
- // grpc-status-details-bin header; use the full details proto.
- return &Status{s: st}
- }
- return &Status{
- s: &spb.Status{
- Code: int32(codes.Internal),
- Message: fmt.Sprintf(
- "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v",
- code, message, st,
- ),
- },
- }
-}
-
-// New returns a Status representing c and msg.
-func New(c codes.Code, msg string) *Status {
- return &Status{s: &spb.Status{Code: int32(c), Message: msg}}
-}
-
-// Newf returns New(c, fmt.Sprintf(format, a...)).
-func Newf(c codes.Code, format string, a ...any) *Status {
- return New(c, fmt.Sprintf(format, a...))
-}
-
-// FromProto returns a Status representing s.
-func FromProto(s *spb.Status) *Status {
- return &Status{s: proto.Clone(s).(*spb.Status)}
-}
-
-// Err returns an error representing c and msg. If c is OK, returns nil.
-func Err(c codes.Code, msg string) error {
- return New(c, msg).Err()
-}
-
-// Errorf returns Error(c, fmt.Sprintf(format, a...)).
-func Errorf(c codes.Code, format string, a ...any) error {
- return Err(c, fmt.Sprintf(format, a...))
-}
-
-// Code returns the status code contained in s.
-func (s *Status) Code() codes.Code {
- if s == nil || s.s == nil {
- return codes.OK
- }
- return codes.Code(s.s.Code)
-}
-
-// Message returns the message contained in s.
-func (s *Status) Message() string {
- if s == nil || s.s == nil {
- return ""
- }
- return s.s.Message
-}
-
-// Proto returns s's status as an spb.Status proto message.
-func (s *Status) Proto() *spb.Status {
- if s == nil {
- return nil
- }
- return proto.Clone(s.s).(*spb.Status)
-}
-
-// Err returns an immutable error representing s; returns nil if s.Code() is OK.
-func (s *Status) Err() error {
- if s.Code() == codes.OK {
- return nil
- }
- return &Error{s: s}
-}
-
-// WithDetails returns a new status with the provided details messages appended to the status.
-// If any errors are encountered, it returns nil and the first error encountered.
-func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) {
- if s.Code() == codes.OK {
- return nil, errors.New("no error details for status with code OK")
- }
- // s.Code() != OK implies that s.Proto() != nil.
- p := s.Proto()
- for _, detail := range details {
- m, err := anypb.New(protoadapt.MessageV2Of(detail))
- if err != nil {
- return nil, err
- }
- p.Details = append(p.Details, m)
- }
- return &Status{s: p}, nil
-}
-
-// Details returns a slice of details messages attached to the status.
-// If a detail cannot be decoded, the error is returned in place of the detail.
-// If the detail can be decoded, the proto message returned is of the same
-// type that was given to WithDetails().
-func (s *Status) Details() []any {
- if s == nil || s.s == nil {
- return nil
- }
- details := make([]any, 0, len(s.s.Details))
- for _, any := range s.s.Details {
- detail, err := any.UnmarshalNew()
- if err != nil {
- details = append(details, err)
- continue
- }
- // The call to MessageV1Of is required to unwrap the proto message if
- // it implemented only the MessageV1 API. The proto message would have
- // been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are
- // added to a global registry used by any.UnmarshalNew().
- // MessageV1Of has the following behaviour:
- // 1. If the given message is a wrapped MessageV1, it returns the
- // unwrapped value.
- // 2. If the given message already implements MessageV1, it returns it
- // as is.
- // 3. Else, it wraps the MessageV2 in a MessageV1 wrapper.
- //
- // Since the Status.WithDetails() API only accepts MessageV1, calling
- // MessageV1Of ensures we return the same type that was given to
- // WithDetails:
- // * If the give type implemented only MessageV1, the unwrapping from
- // point 1 above will restore the type.
- // * If the given type implemented both MessageV1 and MessageV2, point 2
- // above will ensure no wrapping is performed.
- // * If the given type implemented only MessageV2 and was wrapped using
- // MessageV1Of before passing to WithDetails(), it would be unwrapped
- // in WithDetails by calling MessageV2Of(). Point 3 above will ensure
- // that the type is wrapped in a MessageV1 wrapper again before
- // returning. Note that protoc-gen-go doesn't generate code which
- // implements ONLY MessageV2 at the time of writing.
- //
- // NOTE: Status details can also be added using the FromProto method.
- // This could theoretically allow passing a Detail message that only
- // implements the V2 API. In such a case the message will be wrapped in
- // a MessageV1 wrapper when fetched using Details().
- // Since protoc-gen-go generates only code that implements both V1 and
- // V2 APIs for backward compatibility, this is not a concern.
- details = append(details, protoadapt.MessageV1Of(detail))
- }
- return details
-}
-
-func (s *Status) String() string {
- return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message())
-}
-
-// Error wraps a pointer of a status proto. It implements error and Status,
-// and a nil *Error should never be returned by this package.
-type Error struct {
- s *Status
-}
-
-func (e *Error) Error() string {
- return e.s.String()
-}
-
-// GRPCStatus returns the Status represented by se.
-func (e *Error) GRPCStatus() *Status {
- return e.s
-}
-
-// Is implements future error.Is functionality.
-// A Error is equivalent if the code and message are identical.
-func (e *Error) Is(target error) bool {
- tse, ok := target.(*Error)
- if !ok {
- return false
- }
- return proto.Equal(e.s.s, tse.s.s)
-}
-
-// IsRestrictedControlPlaneCode returns whether the status includes a code
-// restricted for control plane usage as defined by gRFC A54.
-func IsRestrictedControlPlaneCode(s *Status) bool {
- switch s.Code() {
- case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss:
- return true
- }
- return false
-}
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
deleted file mode 100644
index b3a72276d..000000000
--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package syscall provides functionalities that grpc uses to get low-level operating system
-// stats/info.
-package syscall
-
-import (
- "fmt"
- "net"
- "syscall"
- "time"
-
- "golang.org/x/sys/unix"
- "google.golang.org/grpc/grpclog"
-)
-
-var logger = grpclog.Component("core")
-
-// GetCPUTime returns the how much CPU time has passed since the start of this process.
-func GetCPUTime() int64 {
- var ts unix.Timespec
- if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
- logger.Fatal(err)
- }
- return ts.Nano()
-}
-
-// Rusage is an alias for syscall.Rusage under linux environment.
-type Rusage = syscall.Rusage
-
-// GetRusage returns the resource usage of current process.
-func GetRusage() *Rusage {
- rusage := new(Rusage)
- syscall.Getrusage(syscall.RUSAGE_SELF, rusage)
- return rusage
-}
-
-// CPUTimeDiff returns the differences of user CPU time and system CPU time used
-// between two Rusage structs.
-func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
- var (
- utimeDiffs = latest.Utime.Sec - first.Utime.Sec
- utimeDiffus = latest.Utime.Usec - first.Utime.Usec
- stimeDiffs = latest.Stime.Sec - first.Stime.Sec
- stimeDiffus = latest.Stime.Usec - first.Stime.Usec
- )
-
- uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6
- sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6
-
- return uTimeElapsed, sTimeElapsed
-}
-
-// SetTCPUserTimeout sets the TCP user timeout on a connection's socket
-func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
- tcpconn, ok := conn.(*net.TCPConn)
- if !ok {
- // not a TCP connection. exit early
- return nil
- }
- rawConn, err := tcpconn.SyscallConn()
- if err != nil {
- return fmt.Errorf("error getting raw connection: %v", err)
- }
- err = rawConn.Control(func(fd uintptr) {
- err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond))
- })
- if err != nil {
- return fmt.Errorf("error setting option on socket: %v", err)
- }
-
- return nil
-}
-
-// GetTCPUserTimeout gets the TCP user timeout on a connection's socket
-func GetTCPUserTimeout(conn net.Conn) (opt int, err error) {
- tcpconn, ok := conn.(*net.TCPConn)
- if !ok {
- err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn)
- return
- }
- rawConn, err := tcpconn.SyscallConn()
- if err != nil {
- err = fmt.Errorf("error getting raw connection: %v", err)
- return
- }
- err = rawConn.Control(func(fd uintptr) {
- opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT)
- })
- if err != nil {
- err = fmt.Errorf("error getting option on socket: %v", err)
- return
- }
-
- return
-}
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
deleted file mode 100644
index 54c24c2ff..000000000
--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
+++ /dev/null
@@ -1,77 +0,0 @@
-//go:build !linux
-// +build !linux
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package syscall provides functionalities that grpc uses to get low-level
-// operating system stats/info.
-package syscall
-
-import (
- "net"
- "sync"
- "time"
-
- "google.golang.org/grpc/grpclog"
-)
-
-var once sync.Once
-var logger = grpclog.Component("core")
-
-func log() {
- once.Do(func() {
- logger.Info("CPU time info is unavailable on non-linux environments.")
- })
-}
-
-// GetCPUTime returns the how much CPU time has passed since the start of this
-// process. It always returns 0 under non-linux environments.
-func GetCPUTime() int64 {
- log()
- return 0
-}
-
-// Rusage is an empty struct under non-linux environments.
-type Rusage struct{}
-
-// GetRusage is a no-op function under non-linux environments.
-func GetRusage() *Rusage {
- log()
- return nil
-}
-
-// CPUTimeDiff returns the differences of user CPU time and system CPU time used
-// between two Rusage structs. It a no-op function for non-linux environments.
-func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) {
- log()
- return 0, 0
-}
-
-// SetTCPUserTimeout is a no-op function under non-linux environments.
-func SetTCPUserTimeout(net.Conn, time.Duration) error {
- log()
- return nil
-}
-
-// GetTCPUserTimeout is a no-op function under non-linux environments.
-// A negative return value indicates the operation is not supported
-func GetTCPUserTimeout(net.Conn) (int, error) {
- log()
- return -1, nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go
deleted file mode 100644
index 4f347edd4..000000000
--- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go
+++ /dev/null
@@ -1,29 +0,0 @@
-//go:build !unix && !windows
-
-/*
- * Copyright 2023 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package internal
-
-import (
- "net"
-)
-
-// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms.
-func NetDialerWithTCPKeepalive() *net.Dialer {
- return &net.Dialer{}
-}
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
deleted file mode 100644
index 7e7aaa546..000000000
--- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
+++ /dev/null
@@ -1,54 +0,0 @@
-//go:build unix
-
-/*
- * Copyright 2023 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package internal
-
-import (
- "net"
- "syscall"
- "time"
-
- "golang.org/x/sys/unix"
-)
-
-// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on
-// the underlying connection with OS default values for keepalive parameters.
-//
-// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
-// appropriate Go version becomes less than our least supported Go version, we
-// should look into using the new API to make things more straightforward.
-func NetDialerWithTCPKeepalive() *net.Dialer {
- return &net.Dialer{
- // Setting a negative value here prevents the Go stdlib from overriding
- // the values of TCP keepalive time and interval. It also prevents the
- // Go stdlib from enabling TCP keepalives by default.
- KeepAlive: time.Duration(-1),
- // This method is called after the underlying network socket is created,
- // but before dialing the socket (or calling its connect() method). The
- // combination of unconditionally enabling TCP keepalives here, and
- // disabling the overriding of TCP keepalive parameters by setting the
- // KeepAlive field to a negative value above, results in OS defaults for
- // the TCP keepalive interval and time parameters.
- Control: func(_, _ string, c syscall.RawConn) error {
- return c.Control(func(fd uintptr) {
- unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
- })
- },
- }
-}
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
deleted file mode 100644
index d5c1085ee..000000000
--- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
+++ /dev/null
@@ -1,54 +0,0 @@
-//go:build windows
-
-/*
- * Copyright 2023 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package internal
-
-import (
- "net"
- "syscall"
- "time"
-
- "golang.org/x/sys/windows"
-)
-
-// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on
-// the underlying connection with OS default values for keepalive parameters.
-//
-// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
-// appropriate Go version becomes less than our least supported Go version, we
-// should look into using the new API to make things more straightforward.
-func NetDialerWithTCPKeepalive() *net.Dialer {
- return &net.Dialer{
- // Setting a negative value here prevents the Go stdlib from overriding
- // the values of TCP keepalive time and interval. It also prevents the
- // Go stdlib from enabling TCP keepalives by default.
- KeepAlive: time.Duration(-1),
- // This method is called after the underlying network socket is created,
- // but before dialing the socket (or calling its connect() method). The
- // combination of unconditionally enabling TCP keepalives here, and
- // disabling the overriding of TCP keepalive parameters by setting the
- // KeepAlive field to a negative value above, results in OS defaults for
- // the TCP keepalive interval and time parameters.
- Control: func(_, _ string, c syscall.RawConn) error {
- return c.Control(func(fd uintptr) {
- windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1)
- })
- },
- }
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
deleted file mode 100644
index 070680edb..000000000
--- a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "sync"
- "time"
-)
-
-const (
- // bdpLimit is the maximum value the flow control windows will be increased
- // to. TCP typically limits this to 4MB, but some systems go up to 16MB.
- // Since this is only a limit, it is safe to make it optimistic.
- bdpLimit = (1 << 20) * 16
- // alpha is a constant factor used to keep a moving average
- // of RTTs.
- alpha = 0.9
- // If the current bdp sample is greater than or equal to
- // our beta * our estimated bdp and the current bandwidth
- // sample is the maximum bandwidth observed so far, we
- // increase our bbp estimate by a factor of gamma.
- beta = 0.66
- // To put our bdp to be smaller than or equal to twice the real BDP,
- // we should multiply our current sample with 4/3, however to round things out
- // we use 2 as the multiplication factor.
- gamma = 2
-)
-
-// Adding arbitrary data to ping so that its ack can be identified.
-// Easter-egg: what does the ping message say?
-var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
-
-type bdpEstimator struct {
- // sentAt is the time when the ping was sent.
- sentAt time.Time
-
- mu sync.Mutex
- // bdp is the current bdp estimate.
- bdp uint32
- // sample is the number of bytes received in one measurement cycle.
- sample uint32
- // bwMax is the maximum bandwidth noted so far (bytes/sec).
- bwMax float64
- // bool to keep track of the beginning of a new measurement cycle.
- isSent bool
- // Callback to update the window sizes.
- updateFlowControl func(n uint32)
- // sampleCount is the number of samples taken so far.
- sampleCount uint64
- // round trip time (seconds)
- rtt float64
-}
-
-// timesnap registers the time bdp ping was sent out so that
-// network rtt can be calculated when its ack is received.
-// It is called (by controller) when the bdpPing is
-// being written on the wire.
-func (b *bdpEstimator) timesnap(d [8]byte) {
- if bdpPing.data != d {
- return
- }
- b.sentAt = time.Now()
-}
-
-// add adds bytes to the current sample for calculating bdp.
-// It returns true only if a ping must be sent. This can be used
-// by the caller (handleData) to make decision about batching
-// a window update with it.
-func (b *bdpEstimator) add(n uint32) bool {
- b.mu.Lock()
- defer b.mu.Unlock()
- if b.bdp == bdpLimit {
- return false
- }
- if !b.isSent {
- b.isSent = true
- b.sample = n
- b.sentAt = time.Time{}
- b.sampleCount++
- return true
- }
- b.sample += n
- return false
-}
-
-// calculate is called when an ack for a bdp ping is received.
-// Here we calculate the current bdp and bandwidth sample and
-// decide if the flow control windows should go up.
-func (b *bdpEstimator) calculate(d [8]byte) {
- // Check if the ping acked for was the bdp ping.
- if bdpPing.data != d {
- return
- }
- b.mu.Lock()
- rttSample := time.Since(b.sentAt).Seconds()
- if b.sampleCount < 10 {
- // Bootstrap rtt with an average of first 10 rtt samples.
- b.rtt += (rttSample - b.rtt) / float64(b.sampleCount)
- } else {
- // Heed to the recent past more.
- b.rtt += (rttSample - b.rtt) * float64(alpha)
- }
- b.isSent = false
- // The number of bytes accumulated so far in the sample is smaller
- // than or equal to 1.5 times the real BDP on a saturated connection.
- bwCurrent := float64(b.sample) / (b.rtt * float64(1.5))
- if bwCurrent > b.bwMax {
- b.bwMax = bwCurrent
- }
- // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is
- // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we
- // should update our perception of the network BDP.
- if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit {
- sampleFloat := float64(b.sample)
- b.bdp = uint32(gamma * sampleFloat)
- if b.bdp > bdpLimit {
- b.bdp = bdpLimit
- }
- bdp := b.bdp
- b.mu.Unlock()
- b.updateFlowControl(bdp)
- return
- }
- b.mu.Unlock()
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
deleted file mode 100644
index 8ed347c54..000000000
--- a/vendor/google.golang.org/grpc/internal/transport/client_stream.go
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "sync/atomic"
-
- "golang.org/x/net/http2"
- "google.golang.org/grpc/mem"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
-)
-
-// ClientStream implements streaming functionality for a gRPC client.
-type ClientStream struct {
- *Stream // Embed for common stream functionality.
-
- ct *http2Client
- done chan struct{} // closed at the end of stream to unblock writers.
- doneFunc func() // invoked at the end of stream.
-
- headerChan chan struct{} // closed to indicate the end of header metadata.
- headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
- // headerValid indicates whether a valid header was received. Only
- // meaningful after headerChan is closed (always call waitOnHeader() before
- // reading its value).
- headerValid bool
- header metadata.MD // the received header metadata
- noHeaders bool // set if the client never received headers (set only after the stream is done).
-
- bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream
- unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream
-
- status *status.Status // the status error received from the server
-}
-
-// Read reads an n byte message from the input stream.
-func (s *ClientStream) Read(n int) (mem.BufferSlice, error) {
- b, err := s.Stream.read(n)
- if err == nil {
- s.ct.incrMsgRecv()
- }
- return b, err
-}
-
-// Close closes the stream and popagates err to any readers.
-func (s *ClientStream) Close(err error) {
- var (
- rst bool
- rstCode http2.ErrCode
- )
- if err != nil {
- rst = true
- rstCode = http2.ErrCodeCancel
- }
- s.ct.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
-}
-
-// Write writes the hdr and data bytes to the output stream.
-func (s *ClientStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
- return s.ct.write(s, hdr, data, opts)
-}
-
-// BytesReceived indicates whether any bytes have been received on this stream.
-func (s *ClientStream) BytesReceived() bool {
- return s.bytesReceived.Load()
-}
-
-// Unprocessed indicates whether the server did not process this stream --
-// i.e. it sent a refused stream or GOAWAY including this stream ID.
-func (s *ClientStream) Unprocessed() bool {
- return s.unprocessed.Load()
-}
-
-func (s *ClientStream) waitOnHeader() {
- select {
- case <-s.ctx.Done():
- // Close the stream to prevent headers/trailers from changing after
- // this function returns.
- s.Close(ContextErr(s.ctx.Err()))
- // headerChan could possibly not be closed yet if closeStream raced
- // with operateHeaders; wait until it is closed explicitly here.
- <-s.headerChan
- case <-s.headerChan:
- }
-}
-
-// RecvCompress returns the compression algorithm applied to the inbound
-// message. It is empty string if there is no compression applied.
-func (s *ClientStream) RecvCompress() string {
- s.waitOnHeader()
- return s.recvCompress
-}
-
-// Done returns a channel which is closed when it receives the final status
-// from the server.
-func (s *ClientStream) Done() <-chan struct{} {
- return s.done
-}
-
-// Header returns the header metadata of the stream. Acquires the key-value
-// pairs of header metadata once it is available. It blocks until i) the
-// metadata is ready or ii) there is no header metadata or iii) the stream is
-// canceled/expired.
-func (s *ClientStream) Header() (metadata.MD, error) {
- s.waitOnHeader()
-
- if !s.headerValid || s.noHeaders {
- return nil, s.status.Err()
- }
-
- return s.header.Copy(), nil
-}
-
-// TrailersOnly blocks until a header or trailers-only frame is received and
-// then returns true if the stream was trailers-only. If the stream ends
-// before headers are received, returns true, nil.
-func (s *ClientStream) TrailersOnly() bool {
- s.waitOnHeader()
- return s.noHeaders
-}
-
-// Status returns the status received from the server.
-// Status can be read safely only after the stream has ended,
-// that is, after Done() is closed.
-func (s *ClientStream) Status() *status.Status {
- return s.status
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
deleted file mode 100644
index ef72fbb3a..000000000
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ /dev/null
@@ -1,1035 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "bytes"
- "errors"
- "fmt"
- "net"
- "runtime"
- "strconv"
- "sync"
- "sync/atomic"
-
- "golang.org/x/net/http2"
- "golang.org/x/net/http2/hpack"
- "google.golang.org/grpc/internal/grpclog"
- "google.golang.org/grpc/internal/grpcutil"
- "google.golang.org/grpc/mem"
- "google.golang.org/grpc/status"
-)
-
-var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
- e.SetMaxDynamicTableSizeLimit(v)
-}
-
-type itemNode struct {
- it any
- next *itemNode
-}
-
-type itemList struct {
- head *itemNode
- tail *itemNode
-}
-
-func (il *itemList) enqueue(i any) {
- n := &itemNode{it: i}
- if il.tail == nil {
- il.head, il.tail = n, n
- return
- }
- il.tail.next = n
- il.tail = n
-}
-
-// peek returns the first item in the list without removing it from the
-// list.
-func (il *itemList) peek() any {
- return il.head.it
-}
-
-func (il *itemList) dequeue() any {
- if il.head == nil {
- return nil
- }
- i := il.head.it
- il.head = il.head.next
- if il.head == nil {
- il.tail = nil
- }
- return i
-}
-
-func (il *itemList) dequeueAll() *itemNode {
- h := il.head
- il.head, il.tail = nil, nil
- return h
-}
-
-func (il *itemList) isEmpty() bool {
- return il.head == nil
-}
-
-// The following defines various control items which could flow through
-// the control buffer of transport. They represent different aspects of
-// control tasks, e.g., flow control, settings, streaming resetting, etc.
-
-// maxQueuedTransportResponseFrames is the most queued "transport response"
-// frames we will buffer before preventing new reads from occurring on the
-// transport. These are control frames sent in response to client requests,
-// such as RST_STREAM due to bad headers or settings acks.
-const maxQueuedTransportResponseFrames = 50
-
-type cbItem interface {
- isTransportResponseFrame() bool
-}
-
-// registerStream is used to register an incoming stream with loopy writer.
-type registerStream struct {
- streamID uint32
- wq *writeQuota
-}
-
-func (*registerStream) isTransportResponseFrame() bool { return false }
-
-// headerFrame is also used to register stream on the client-side.
-type headerFrame struct {
- streamID uint32
- hf []hpack.HeaderField
- endStream bool // Valid on server side.
- initStream func(uint32) error // Used only on the client side.
- onWrite func()
- wq *writeQuota // write quota for the stream created.
- cleanup *cleanupStream // Valid on the server side.
- onOrphaned func(error) // Valid on client-side
-}
-
-func (h *headerFrame) isTransportResponseFrame() bool {
- return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM
-}
-
-type cleanupStream struct {
- streamID uint32
- rst bool
- rstCode http2.ErrCode
- onWrite func()
-}
-
-func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
-
-type earlyAbortStream struct {
- httpStatus uint32
- streamID uint32
- contentSubtype string
- status *status.Status
- rst bool
-}
-
-func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
-
-type dataFrame struct {
- streamID uint32
- endStream bool
- h []byte
- reader mem.Reader
- // onEachWrite is called every time
- // a part of data is written out.
- onEachWrite func()
-}
-
-func (*dataFrame) isTransportResponseFrame() bool { return false }
-
-type incomingWindowUpdate struct {
- streamID uint32
- increment uint32
-}
-
-func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false }
-
-type outgoingWindowUpdate struct {
- streamID uint32
- increment uint32
-}
-
-func (*outgoingWindowUpdate) isTransportResponseFrame() bool {
- return false // window updates are throttled by thresholds
-}
-
-type incomingSettings struct {
- ss []http2.Setting
-}
-
-func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK
-
-type outgoingSettings struct {
- ss []http2.Setting
-}
-
-func (*outgoingSettings) isTransportResponseFrame() bool { return false }
-
-type incomingGoAway struct {
-}
-
-func (*incomingGoAway) isTransportResponseFrame() bool { return false }
-
-type goAway struct {
- code http2.ErrCode
- debugData []byte
- headsUp bool
- closeConn error // if set, loopyWriter will exit with this error
-}
-
-func (*goAway) isTransportResponseFrame() bool { return false }
-
-type ping struct {
- ack bool
- data [8]byte
-}
-
-func (*ping) isTransportResponseFrame() bool { return true }
-
-type outFlowControlSizeRequest struct {
- resp chan uint32
-}
-
-func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
-
-// closeConnection is an instruction to tell the loopy writer to flush the
-// framer and exit, which will cause the transport's connection to be closed
-// (by the client or server). The transport itself will close after the reader
-// encounters the EOF caused by the connection closure.
-type closeConnection struct{}
-
-func (closeConnection) isTransportResponseFrame() bool { return false }
-
-type outStreamState int
-
-const (
- active outStreamState = iota
- empty
- waitingOnStreamQuota
-)
-
-type outStream struct {
- id uint32
- state outStreamState
- itl *itemList
- bytesOutStanding int
- wq *writeQuota
-
- next *outStream
- prev *outStream
-}
-
-func (s *outStream) deleteSelf() {
- if s.prev != nil {
- s.prev.next = s.next
- }
- if s.next != nil {
- s.next.prev = s.prev
- }
- s.next, s.prev = nil, nil
-}
-
-type outStreamList struct {
- // Following are sentinel objects that mark the
- // beginning and end of the list. They do not
- // contain any item lists. All valid objects are
- // inserted in between them.
- // This is needed so that an outStream object can
- // deleteSelf() in O(1) time without knowing which
- // list it belongs to.
- head *outStream
- tail *outStream
-}
-
-func newOutStreamList() *outStreamList {
- head, tail := new(outStream), new(outStream)
- head.next = tail
- tail.prev = head
- return &outStreamList{
- head: head,
- tail: tail,
- }
-}
-
-func (l *outStreamList) enqueue(s *outStream) {
- e := l.tail.prev
- e.next = s
- s.prev = e
- s.next = l.tail
- l.tail.prev = s
-}
-
-// remove from the beginning of the list.
-func (l *outStreamList) dequeue() *outStream {
- b := l.head.next
- if b == l.tail {
- return nil
- }
- b.deleteSelf()
- return b
-}
-
-// controlBuffer is a way to pass information to loopy.
-//
-// Information is passed as specific struct types called control frames. A
-// control frame not only represents data, messages or headers to be sent out
-// but can also be used to instruct loopy to update its internal state. It
-// shouldn't be confused with an HTTP2 frame, although some of the control
-// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames.
-type controlBuffer struct {
- wakeupCh chan struct{} // Unblocks readers waiting for something to read.
- done <-chan struct{} // Closed when the transport is done.
-
- // Mutex guards all the fields below, except trfChan which can be read
- // atomically without holding mu.
- mu sync.Mutex
- consumerWaiting bool // True when readers are blocked waiting for new data.
- closed bool // True when the controlbuf is finished.
- list *itemList // List of queued control frames.
-
- // transportResponseFrames counts the number of queued items that represent
- // the response of an action initiated by the peer. trfChan is created
- // when transportResponseFrames >= maxQueuedTransportResponseFrames and is
- // closed and nilled when transportResponseFrames drops below the
- // threshold. Both fields are protected by mu.
- transportResponseFrames int
- trfChan atomic.Pointer[chan struct{}]
-}
-
-func newControlBuffer(done <-chan struct{}) *controlBuffer {
- return &controlBuffer{
- wakeupCh: make(chan struct{}, 1),
- list: &itemList{},
- done: done,
- }
-}
-
-// throttle blocks if there are too many frames in the control buf that
-// represent the response of an action initiated by the peer, like
-// incomingSettings cleanupStreams etc.
-func (c *controlBuffer) throttle() {
- if ch := c.trfChan.Load(); ch != nil {
- select {
- case <-(*ch):
- case <-c.done:
- }
- }
-}
-
-// put adds an item to the controlbuf.
-func (c *controlBuffer) put(it cbItem) error {
- _, err := c.executeAndPut(nil, it)
- return err
-}
-
-// executeAndPut runs f, and if the return value is true, adds the given item to
-// the controlbuf. The item could be nil, in which case, this method simply
-// executes f and does not add the item to the controlbuf.
-//
-// The first return value indicates whether the item was successfully added to
-// the control buffer. A non-nil error, specifically ErrConnClosing, is returned
-// if the control buffer is already closed.
-func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.closed {
- return false, ErrConnClosing
- }
- if f != nil {
- if !f() { // f wasn't successful
- return false, nil
- }
- }
- if it == nil {
- return true, nil
- }
-
- var wakeUp bool
- if c.consumerWaiting {
- wakeUp = true
- c.consumerWaiting = false
- }
- c.list.enqueue(it)
- if it.isTransportResponseFrame() {
- c.transportResponseFrames++
- if c.transportResponseFrames == maxQueuedTransportResponseFrames {
- // We are adding the frame that puts us over the threshold; create
- // a throttling channel.
- ch := make(chan struct{})
- c.trfChan.Store(&ch)
- }
- }
- if wakeUp {
- select {
- case c.wakeupCh <- struct{}{}:
- default:
- }
- }
- return true, nil
-}
-
-// get returns the next control frame from the control buffer. If block is true
-// **and** there are no control frames in the control buffer, the call blocks
-// until one of the conditions is met: there is a frame to return or the
-// transport is closed.
-func (c *controlBuffer) get(block bool) (any, error) {
- for {
- c.mu.Lock()
- frame, err := c.getOnceLocked()
- if frame != nil || err != nil || !block {
- // If we read a frame or an error, we can return to the caller. The
- // call to getOnceLocked() returns a nil frame and a nil error if
- // there is nothing to read, and in that case, if the caller asked
- // us not to block, we can return now as well.
- c.mu.Unlock()
- return frame, err
- }
- c.consumerWaiting = true
- c.mu.Unlock()
-
- // Release the lock above and wait to be woken up.
- select {
- case <-c.wakeupCh:
- case <-c.done:
- return nil, errors.New("transport closed by client")
- }
- }
-}
-
-// Callers must not use this method, but should instead use get().
-//
-// Caller must hold c.mu.
-func (c *controlBuffer) getOnceLocked() (any, error) {
- if c.closed {
- return false, ErrConnClosing
- }
- if c.list.isEmpty() {
- return nil, nil
- }
- h := c.list.dequeue().(cbItem)
- if h.isTransportResponseFrame() {
- if c.transportResponseFrames == maxQueuedTransportResponseFrames {
- // We are removing the frame that put us over the
- // threshold; close and clear the throttling channel.
- ch := c.trfChan.Swap(nil)
- close(*ch)
- }
- c.transportResponseFrames--
- }
- return h, nil
-}
-
-// finish closes the control buffer, cleaning up any streams that have queued
-// header frames. Once this method returns, no more frames can be added to the
-// control buffer, and attempts to do so will return ErrConnClosing.
-func (c *controlBuffer) finish() {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.closed {
- return
- }
- c.closed = true
- // There may be headers for streams in the control buffer.
- // These streams need to be cleaned out since the transport
- // is still not aware of these yet.
- for head := c.list.dequeueAll(); head != nil; head = head.next {
- switch v := head.it.(type) {
- case *headerFrame:
- if v.onOrphaned != nil { // It will be nil on the server-side.
- v.onOrphaned(ErrConnClosing)
- }
- case *dataFrame:
- _ = v.reader.Close()
- }
- }
-
- // In case throttle() is currently in flight, it needs to be unblocked.
- // Otherwise, the transport may not close, since the transport is closed by
- // the reader encountering the connection error.
- ch := c.trfChan.Swap(nil)
- if ch != nil {
- close(*ch)
- }
-}
-
-type side int
-
-const (
- clientSide side = iota
- serverSide
-)
-
-// Loopy receives frames from the control buffer.
-// Each frame is handled individually; most of the work done by loopy goes
-// into handling data frames. Loopy maintains a queue of active streams, and each
-// stream maintains a queue of data frames; as loopy receives data frames
-// it gets added to the queue of the relevant stream.
-// Loopy goes over this list of active streams by processing one node every iteration,
-// thereby closely resembling a round-robin scheduling over all streams. While
-// processing a stream, loopy writes out data bytes from this stream capped by the min
-// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
-type loopyWriter struct {
- side side
- cbuf *controlBuffer
- sendQuota uint32
- oiws uint32 // outbound initial window size.
- // estdStreams is map of all established streams that are not cleaned-up yet.
- // On client-side, this is all streams whose headers were sent out.
- // On server-side, this is all streams whose headers were received.
- estdStreams map[uint32]*outStream // Established streams.
- // activeStreams is a linked-list of all streams that have data to send and some
- // stream-level flow control quota.
- // Each of these streams internally have a list of data items(and perhaps trailers
- // on the server-side) to be sent out.
- activeStreams *outStreamList
- framer *framer
- hBuf *bytes.Buffer // The buffer for HPACK encoding.
- hEnc *hpack.Encoder // HPACK encoder.
- bdpEst *bdpEstimator
- draining bool
- conn net.Conn
- logger *grpclog.PrefixLogger
- bufferPool mem.BufferPool
-
- // Side-specific handlers
- ssGoAwayHandler func(*goAway) (bool, error)
-}
-
-func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter {
- var buf bytes.Buffer
- l := &loopyWriter{
- side: s,
- cbuf: cbuf,
- sendQuota: defaultWindowSize,
- oiws: defaultWindowSize,
- estdStreams: make(map[uint32]*outStream),
- activeStreams: newOutStreamList(),
- framer: fr,
- hBuf: &buf,
- hEnc: hpack.NewEncoder(&buf),
- bdpEst: bdpEst,
- conn: conn,
- logger: logger,
- ssGoAwayHandler: goAwayHandler,
- bufferPool: bufferPool,
- }
- return l
-}
-
-const minBatchSize = 1000
-
-// run should be run in a separate goroutine.
-// It reads control frames from controlBuf and processes them by:
-// 1. Updating loopy's internal state, or/and
-// 2. Writing out HTTP2 frames on the wire.
-//
-// Loopy keeps all active streams with data to send in a linked-list.
-// All streams in the activeStreams linked-list must have both:
-// 1. Data to send, and
-// 2. Stream level flow control quota available.
-//
-// In each iteration of run loop, other than processing the incoming control
-// frame, loopy calls processData, which processes one node from the
-// activeStreams linked-list. This results in writing of HTTP2 frames into an
-// underlying write buffer. When there's no more control frames to read from
-// controlBuf, loopy flushes the write buffer. As an optimization, to increase
-// the batch size for each flush, loopy yields the processor, once if the batch
-// size is too low to give stream goroutines a chance to fill it up.
-//
-// Upon exiting, if the error causing the exit is not an I/O error, run()
-// flushes the underlying connection. The connection is always left open to
-// allow different closing behavior on the client and server.
-func (l *loopyWriter) run() (err error) {
- defer func() {
- if l.logger.V(logLevel) {
- l.logger.Infof("loopyWriter exiting with error: %v", err)
- }
- if !isIOError(err) {
- l.framer.writer.Flush()
- }
- l.cbuf.finish()
- }()
- for {
- it, err := l.cbuf.get(true)
- if err != nil {
- return err
- }
- if err = l.handle(it); err != nil {
- return err
- }
- if _, err = l.processData(); err != nil {
- return err
- }
- gosched := true
- hasdata:
- for {
- it, err := l.cbuf.get(false)
- if err != nil {
- return err
- }
- if it != nil {
- if err = l.handle(it); err != nil {
- return err
- }
- if _, err = l.processData(); err != nil {
- return err
- }
- continue hasdata
- }
- isEmpty, err := l.processData()
- if err != nil {
- return err
- }
- if !isEmpty {
- continue hasdata
- }
- if gosched {
- gosched = false
- if l.framer.writer.offset < minBatchSize {
- runtime.Gosched()
- continue hasdata
- }
- }
- l.framer.writer.Flush()
- break hasdata
- }
- }
-}
-
-func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error {
- return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
-}
-
-func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) {
- // Otherwise update the quota.
- if w.streamID == 0 {
- l.sendQuota += w.increment
- return
- }
- // Find the stream and update it.
- if str, ok := l.estdStreams[w.streamID]; ok {
- str.bytesOutStanding -= int(w.increment)
- if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
- str.state = active
- l.activeStreams.enqueue(str)
- return
- }
- }
-}
-
-func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
- return l.framer.fr.WriteSettings(s.ss...)
-}
-
-func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
- l.applySettings(s.ss)
- return l.framer.fr.WriteSettingsAck()
-}
-
-func (l *loopyWriter) registerStreamHandler(h *registerStream) {
- str := &outStream{
- id: h.streamID,
- state: empty,
- itl: &itemList{},
- wq: h.wq,
- }
- l.estdStreams[h.streamID] = str
-}
-
-func (l *loopyWriter) headerHandler(h *headerFrame) error {
- if l.side == serverSide {
- str, ok := l.estdStreams[h.streamID]
- if !ok {
- if l.logger.V(logLevel) {
- l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID)
- }
- return nil
- }
- // Case 1.A: Server is responding back with headers.
- if !h.endStream {
- return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
- }
- // else: Case 1.B: Server wants to close stream.
-
- if str.state != empty { // either active or waiting on stream quota.
- // add it str's list of items.
- str.itl.enqueue(h)
- return nil
- }
- if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
- return err
- }
- return l.cleanupStreamHandler(h.cleanup)
- }
- // Case 2: Client wants to originate stream.
- str := &outStream{
- id: h.streamID,
- state: empty,
- itl: &itemList{},
- wq: h.wq,
- }
- return l.originateStream(str, h)
-}
-
-func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error {
- // l.draining is set when handling GoAway. In which case, we want to avoid
- // creating new streams.
- if l.draining {
- // TODO: provide a better error with the reason we are in draining.
- hdr.onOrphaned(errStreamDrain)
- return nil
- }
- if err := hdr.initStream(str.id); err != nil {
- return err
- }
- if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
- return err
- }
- l.estdStreams[str.id] = str
- return nil
-}
-
-func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error {
- if onWrite != nil {
- onWrite()
- }
- l.hBuf.Reset()
- for _, f := range hf {
- if err := l.hEnc.WriteField(f); err != nil {
- if l.logger.V(logLevel) {
- l.logger.Warningf("Encountered error while encoding headers: %v", err)
- }
- }
- }
- var (
- err error
- endHeaders, first bool
- )
- first = true
- for !endHeaders {
- size := l.hBuf.Len()
- if size > http2MaxFrameLen {
- size = http2MaxFrameLen
- } else {
- endHeaders = true
- }
- if first {
- first = false
- err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{
- StreamID: streamID,
- BlockFragment: l.hBuf.Next(size),
- EndStream: endStream,
- EndHeaders: endHeaders,
- })
- } else {
- err = l.framer.fr.WriteContinuation(
- streamID,
- endHeaders,
- l.hBuf.Next(size),
- )
- }
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (l *loopyWriter) preprocessData(df *dataFrame) {
- str, ok := l.estdStreams[df.streamID]
- if !ok {
- return
- }
- // If we got data for a stream it means that
- // stream was originated and the headers were sent out.
- str.itl.enqueue(df)
- if str.state == empty {
- str.state = active
- l.activeStreams.enqueue(str)
- }
-}
-
-func (l *loopyWriter) pingHandler(p *ping) error {
- if !p.ack {
- l.bdpEst.timesnap(p.data)
- }
- return l.framer.fr.WritePing(p.ack, p.data)
-
-}
-
-func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) {
- o.resp <- l.sendQuota
-}
-
-func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
- c.onWrite()
- if str, ok := l.estdStreams[c.streamID]; ok {
- // On the server side it could be a trailers-only response or
- // a RST_STREAM before stream initialization thus the stream might
- // not be established yet.
- delete(l.estdStreams, c.streamID)
- str.deleteSelf()
- for head := str.itl.dequeueAll(); head != nil; head = head.next {
- if df, ok := head.it.(*dataFrame); ok {
- _ = df.reader.Close()
- }
- }
- }
- if c.rst { // If RST_STREAM needs to be sent.
- if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
- return err
- }
- }
- if l.draining && len(l.estdStreams) == 0 {
- // Flush and close the connection; we are done with it.
- return errors.New("finished processing active streams while in draining mode")
- }
- return nil
-}
-
-func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error {
- if l.side == clientSide {
- return errors.New("earlyAbortStream not handled on client")
- }
- // In case the caller forgets to set the http status, default to 200.
- if eas.httpStatus == 0 {
- eas.httpStatus = 200
- }
- headerFields := []hpack.HeaderField{
- {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))},
- {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)},
- {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))},
- {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())},
- }
-
- if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil {
- return err
- }
- if eas.rst {
- if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
- if l.side == clientSide {
- l.draining = true
- if len(l.estdStreams) == 0 {
- // Flush and close the connection; we are done with it.
- return errors.New("received GOAWAY with no active streams")
- }
- }
- return nil
-}
-
-func (l *loopyWriter) goAwayHandler(g *goAway) error {
- // Handling of outgoing GoAway is very specific to side.
- if l.ssGoAwayHandler != nil {
- draining, err := l.ssGoAwayHandler(g)
- if err != nil {
- return err
- }
- l.draining = draining
- }
- return nil
-}
-
-func (l *loopyWriter) handle(i any) error {
- switch i := i.(type) {
- case *incomingWindowUpdate:
- l.incomingWindowUpdateHandler(i)
- case *outgoingWindowUpdate:
- return l.outgoingWindowUpdateHandler(i)
- case *incomingSettings:
- return l.incomingSettingsHandler(i)
- case *outgoingSettings:
- return l.outgoingSettingsHandler(i)
- case *headerFrame:
- return l.headerHandler(i)
- case *registerStream:
- l.registerStreamHandler(i)
- case *cleanupStream:
- return l.cleanupStreamHandler(i)
- case *earlyAbortStream:
- return l.earlyAbortStreamHandler(i)
- case *incomingGoAway:
- return l.incomingGoAwayHandler(i)
- case *dataFrame:
- l.preprocessData(i)
- case *ping:
- return l.pingHandler(i)
- case *goAway:
- return l.goAwayHandler(i)
- case *outFlowControlSizeRequest:
- l.outFlowControlSizeRequestHandler(i)
- case closeConnection:
- // Just return a non-I/O error and run() will flush and close the
- // connection.
- return ErrConnClosing
- default:
- return fmt.Errorf("transport: unknown control message type %T", i)
- }
- return nil
-}
-
-func (l *loopyWriter) applySettings(ss []http2.Setting) {
- for _, s := range ss {
- switch s.ID {
- case http2.SettingInitialWindowSize:
- o := l.oiws
- l.oiws = s.Val
- if o < l.oiws {
- // If the new limit is greater make all depleted streams active.
- for _, stream := range l.estdStreams {
- if stream.state == waitingOnStreamQuota {
- stream.state = active
- l.activeStreams.enqueue(stream)
- }
- }
- }
- case http2.SettingHeaderTableSize:
- updateHeaderTblSize(l.hEnc, s.Val)
- }
- }
-}
-
-// processData removes the first stream from active streams, writes out at most 16KB
-// of its data and then puts it at the end of activeStreams if there's still more data
-// to be sent and stream has some stream-level flow control.
-func (l *loopyWriter) processData() (bool, error) {
- if l.sendQuota == 0 {
- return true, nil
- }
- str := l.activeStreams.dequeue() // Remove the first stream.
- if str == nil {
- return true, nil
- }
- dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
- // A data item is represented by a dataFrame, since it later translates into
- // multiple HTTP2 data frames.
- // Every dataFrame has two buffers; h that keeps grpc-message header and data
- // that is the actual message. As an optimization to keep wire traffic low, data
- // from data is copied to h to make as big as the maximum possible HTTP2 frame
- // size.
-
- if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame
- // Client sends out empty data frame with endStream = true
- if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
- return false, err
- }
- str.itl.dequeue() // remove the empty data item from stream
- _ = dataItem.reader.Close()
- if str.itl.isEmpty() {
- str.state = empty
- } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
- if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
- return false, err
- }
- if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
- return false, err
- }
- } else {
- l.activeStreams.enqueue(str)
- }
- return false, nil
- }
-
- // Figure out the maximum size we can send
- maxSize := http2MaxFrameLen
- if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
- str.state = waitingOnStreamQuota
- return false, nil
- } else if maxSize > strQuota {
- maxSize = strQuota
- }
- if maxSize > int(l.sendQuota) { // connection-level flow control.
- maxSize = int(l.sendQuota)
- }
- // Compute how much of the header and data we can send within quota and max frame length
- hSize := min(maxSize, len(dataItem.h))
- dSize := min(maxSize-hSize, dataItem.reader.Remaining())
- remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize
- size := hSize + dSize
-
- var buf *[]byte
-
- if hSize != 0 && dSize == 0 {
- buf = &dataItem.h
- } else {
- // Note: this is only necessary because the http2.Framer does not support
- // partially writing a frame, so the sequence must be materialized into a buffer.
- // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed.
- pool := l.bufferPool
- if pool == nil {
- // Note that this is only supposed to be nil in tests. Otherwise, stream is
- // always initialized with a BufferPool.
- pool = mem.DefaultBufferPool()
- }
- buf = pool.Get(size)
- defer pool.Put(buf)
-
- copy((*buf)[:hSize], dataItem.h)
- _, _ = dataItem.reader.Read((*buf)[hSize:])
- }
-
- // Now that outgoing flow controls are checked we can replenish str's write quota
- str.wq.replenish(size)
- var endStream bool
- // If this is the last data message on this stream and all of it can be written in this iteration.
- if dataItem.endStream && remainingBytes == 0 {
- endStream = true
- }
- if dataItem.onEachWrite != nil {
- dataItem.onEachWrite()
- }
- if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil {
- return false, err
- }
- str.bytesOutStanding += size
- l.sendQuota -= uint32(size)
- dataItem.h = dataItem.h[hSize:]
-
- if remainingBytes == 0 { // All the data from that message was written out.
- _ = dataItem.reader.Close()
- str.itl.dequeue()
- }
- if str.itl.isEmpty() {
- str.state = empty
- } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers.
- if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
- return false, err
- }
- if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
- return false, err
- }
- } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota.
- str.state = waitingOnStreamQuota
- } else { // Otherwise add it back to the list of active streams.
- l.activeStreams.enqueue(str)
- }
- return false, nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go
deleted file mode 100644
index bc8ee0747..000000000
--- a/vendor/google.golang.org/grpc/internal/transport/defaults.go
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "math"
- "time"
-)
-
-const (
- // The default value of flow control window size in HTTP2 spec.
- defaultWindowSize = 65535
- // The initial window size for flow control.
- initialWindowSize = defaultWindowSize // for an RPC
- infinity = time.Duration(math.MaxInt64)
- defaultClientKeepaliveTime = infinity
- defaultClientKeepaliveTimeout = 20 * time.Second
- defaultMaxStreamsClient = 100
- defaultMaxConnectionIdle = infinity
- defaultMaxConnectionAge = infinity
- defaultMaxConnectionAgeGrace = infinity
- defaultServerKeepaliveTime = 2 * time.Hour
- defaultServerKeepaliveTimeout = 20 * time.Second
- defaultKeepalivePolicyMinTime = 5 * time.Minute
- // max window limit set by HTTP2 Specs.
- maxWindowSize = math.MaxInt32
- // defaultWriteQuota is the default value for number of data
- // bytes that each stream can schedule before some of it being
- // flushed out.
- defaultWriteQuota = 64 * 1024
- defaultClientMaxHeaderListSize = uint32(16 << 20)
- defaultServerMaxHeaderListSize = uint32(16 << 20)
-)
-
-// MaxStreamID is the upper bound for the stream ID before the current
-// transport gracefully closes and new transport is created for subsequent RPCs.
-// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit
-// integer. It's exported so that tests can override it.
-var MaxStreamID = uint32(math.MaxInt32 * 3 / 4)
diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
deleted file mode 100644
index dfc0f224e..000000000
--- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "fmt"
- "math"
- "sync"
- "sync/atomic"
-)
-
-// writeQuota is a soft limit on the amount of data a stream can
-// schedule before some of it is written out.
-type writeQuota struct {
- quota int32
- // get waits on read from when quota goes less than or equal to zero.
- // replenish writes on it when quota goes positive again.
- ch chan struct{}
- // done is triggered in error case.
- done <-chan struct{}
- // replenish is called by loopyWriter to give quota back to.
- // It is implemented as a field so that it can be updated
- // by tests.
- replenish func(n int)
-}
-
-func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
- w := &writeQuota{
- quota: sz,
- ch: make(chan struct{}, 1),
- done: done,
- }
- w.replenish = w.realReplenish
- return w
-}
-
-func (w *writeQuota) get(sz int32) error {
- for {
- if atomic.LoadInt32(&w.quota) > 0 {
- atomic.AddInt32(&w.quota, -sz)
- return nil
- }
- select {
- case <-w.ch:
- continue
- case <-w.done:
- return errStreamDone
- }
- }
-}
-
-func (w *writeQuota) realReplenish(n int) {
- sz := int32(n)
- a := atomic.AddInt32(&w.quota, sz)
- b := a - sz
- if b <= 0 && a > 0 {
- select {
- case w.ch <- struct{}{}:
- default:
- }
- }
-}
-
-type trInFlow struct {
- limit uint32
- unacked uint32
- effectiveWindowSize uint32
-}
-
-func (f *trInFlow) newLimit(n uint32) uint32 {
- d := n - f.limit
- f.limit = n
- f.updateEffectiveWindowSize()
- return d
-}
-
-func (f *trInFlow) onData(n uint32) uint32 {
- f.unacked += n
- if f.unacked < f.limit/4 {
- f.updateEffectiveWindowSize()
- return 0
- }
- return f.reset()
-}
-
-func (f *trInFlow) reset() uint32 {
- w := f.unacked
- f.unacked = 0
- f.updateEffectiveWindowSize()
- return w
-}
-
-func (f *trInFlow) updateEffectiveWindowSize() {
- atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked)
-}
-
-func (f *trInFlow) getSize() uint32 {
- return atomic.LoadUint32(&f.effectiveWindowSize)
-}
-
-// TODO(mmukhi): Simplify this code.
-// inFlow deals with inbound flow control
-type inFlow struct {
- mu sync.Mutex
- // The inbound flow control limit for pending data.
- limit uint32
- // pendingData is the overall data which have been received but not been
- // consumed by applications.
- pendingData uint32
- // The amount of data the application has consumed but grpc has not sent
- // window update for them. Used to reduce window update frequency.
- pendingUpdate uint32
- // delta is the extra window update given by receiver when an application
- // is reading data bigger in size than the inFlow limit.
- delta uint32
-}
-
-// newLimit updates the inflow window to a new value n.
-// It assumes that n is always greater than the old limit.
-func (f *inFlow) newLimit(n uint32) {
- f.mu.Lock()
- f.limit = n
- f.mu.Unlock()
-}
-
-func (f *inFlow) maybeAdjust(n uint32) uint32 {
- if n > uint32(math.MaxInt32) {
- n = uint32(math.MaxInt32)
- }
- f.mu.Lock()
- defer f.mu.Unlock()
- // estSenderQuota is the receiver's view of the maximum number of bytes the sender
- // can send without a window update.
- estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
- // estUntransmittedData is the maximum number of bytes the sends might not have put
- // on the wire yet. A value of 0 or less means that we have already received all or
- // more bytes than the application is requesting to read.
- estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative.
- // This implies that unless we send a window update, the sender won't be able to send all the bytes
- // for this message. Therefore we must send an update over the limit since there's an active read
- // request from the application.
- if estUntransmittedData > estSenderQuota {
- // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec.
- if f.limit+n > maxWindowSize {
- f.delta = maxWindowSize - f.limit
- } else {
- // Send a window update for the whole message and not just the difference between
- // estUntransmittedData and estSenderQuota. This will be helpful in case the message
- // is padded; We will fallback on the current available window(at least a 1/4th of the limit).
- f.delta = n
- }
- return f.delta
- }
- return 0
-}
-
-// onData is invoked when some data frame is received. It updates pendingData.
-func (f *inFlow) onData(n uint32) error {
- f.mu.Lock()
- f.pendingData += n
- if f.pendingData+f.pendingUpdate > f.limit+f.delta {
- limit := f.limit
- rcvd := f.pendingData + f.pendingUpdate
- f.mu.Unlock()
- return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit)
- }
- f.mu.Unlock()
- return nil
-}
-
-// onRead is invoked when the application reads the data. It returns the window size
-// to be sent to the peer.
-func (f *inFlow) onRead(n uint32) uint32 {
- f.mu.Lock()
- if f.pendingData == 0 {
- f.mu.Unlock()
- return 0
- }
- f.pendingData -= n
- if n > f.delta {
- n -= f.delta
- f.delta = 0
- } else {
- f.delta -= n
- n = 0
- }
- f.pendingUpdate += n
- if f.pendingUpdate >= f.limit/4 {
- wu := f.pendingUpdate
- f.pendingUpdate = 0
- f.mu.Unlock()
- return wu
- }
- f.mu.Unlock()
- return 0
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
deleted file mode 100644
index 3dea23573..000000000
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ /dev/null
@@ -1,502 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// This file is the implementation of a gRPC server using HTTP/2 which
-// uses the standard Go http2 Server implementation (via the
-// http.Handler interface), rather than speaking low-level HTTP/2
-// frames itself. It is the implementation of *grpc.Server.ServeHTTP.
-
-package transport
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "net"
- "net/http"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/net/http2"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/internal/grpclog"
- "google.golang.org/grpc/internal/grpcutil"
- "google.golang.org/grpc/mem"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
- "google.golang.org/protobuf/proto"
-)
-
-// NewServerHandlerTransport returns a ServerTransport handling gRPC from
-// inside an http.Handler, or writes an HTTP error to w and returns an error.
-// It requires that the http Server supports HTTP/2.
-func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
- if r.Method != http.MethodPost {
- w.Header().Set("Allow", http.MethodPost)
- msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
- http.Error(w, msg, http.StatusMethodNotAllowed)
- return nil, errors.New(msg)
- }
- contentType := r.Header.Get("Content-Type")
- // TODO: do we assume contentType is lowercase? we did before
- contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
- if !validContentType {
- msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType)
- http.Error(w, msg, http.StatusUnsupportedMediaType)
- return nil, errors.New(msg)
- }
- if r.ProtoMajor != 2 {
- msg := "gRPC requires HTTP/2"
- http.Error(w, msg, http.StatusHTTPVersionNotSupported)
- return nil, errors.New(msg)
- }
- if _, ok := w.(http.Flusher); !ok {
- msg := "gRPC requires a ResponseWriter supporting http.Flusher"
- http.Error(w, msg, http.StatusInternalServerError)
- return nil, errors.New(msg)
- }
-
- var localAddr net.Addr
- if la := r.Context().Value(http.LocalAddrContextKey); la != nil {
- localAddr, _ = la.(net.Addr)
- }
- var authInfo credentials.AuthInfo
- if r.TLS != nil {
- authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}}
- }
- p := peer.Peer{
- Addr: strAddr(r.RemoteAddr),
- LocalAddr: localAddr,
- AuthInfo: authInfo,
- }
- st := &serverHandlerTransport{
- rw: w,
- req: r,
- closedCh: make(chan struct{}),
- writes: make(chan func()),
- peer: p,
- contentType: contentType,
- contentSubtype: contentSubtype,
- stats: stats,
- bufferPool: bufferPool,
- }
- st.logger = prefixLoggerForServerHandlerTransport(st)
-
- if v := r.Header.Get("grpc-timeout"); v != "" {
- to, err := decodeTimeout(v)
- if err != nil {
- msg := fmt.Sprintf("malformed grpc-timeout: %v", err)
- http.Error(w, msg, http.StatusBadRequest)
- return nil, status.Error(codes.Internal, msg)
- }
- st.timeoutSet = true
- st.timeout = to
- }
-
- metakv := []string{"content-type", contentType}
- if r.Host != "" {
- metakv = append(metakv, ":authority", r.Host)
- }
- for k, vv := range r.Header {
- k = strings.ToLower(k)
- if isReservedHeader(k) && !isWhitelistedHeader(k) {
- continue
- }
- for _, v := range vv {
- v, err := decodeMetadataHeader(k, v)
- if err != nil {
- msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err)
- http.Error(w, msg, http.StatusBadRequest)
- return nil, status.Error(codes.Internal, msg)
- }
- metakv = append(metakv, k, v)
- }
- }
- st.headerMD = metadata.Pairs(metakv...)
-
- return st, nil
-}
-
-// serverHandlerTransport is an implementation of ServerTransport
-// which replies to exactly one gRPC request (exactly one HTTP request),
-// using the net/http.Handler interface. This http.Handler is guaranteed
-// at this point to be speaking over HTTP/2, so it's able to speak valid
-// gRPC.
-type serverHandlerTransport struct {
- rw http.ResponseWriter
- req *http.Request
- timeoutSet bool
- timeout time.Duration
-
- headerMD metadata.MD
-
- peer peer.Peer
-
- closeOnce sync.Once
- closedCh chan struct{} // closed on Close
-
- // writes is a channel of code to run serialized in the
- // ServeHTTP (HandleStreams) goroutine. The channel is closed
- // when WriteStatus is called.
- writes chan func()
-
- // block concurrent WriteStatus calls
- // e.g. grpc/(*serverStream).SendMsg/RecvMsg
- writeStatusMu sync.Mutex
-
- // we just mirror the request content-type
- contentType string
- // we store both contentType and contentSubtype so we don't keep recreating them
- // TODO make sure this is consistent across handler_server and http2_server
- contentSubtype string
-
- stats []stats.Handler
- logger *grpclog.PrefixLogger
-
- bufferPool mem.BufferPool
-}
-
-func (ht *serverHandlerTransport) Close(err error) {
- ht.closeOnce.Do(func() {
- if ht.logger.V(logLevel) {
- ht.logger.Infof("Closing: %v", err)
- }
- close(ht.closedCh)
- })
-}
-
-func (ht *serverHandlerTransport) Peer() *peer.Peer {
- return &peer.Peer{
- Addr: ht.peer.Addr,
- LocalAddr: ht.peer.LocalAddr,
- AuthInfo: ht.peer.AuthInfo,
- }
-}
-
-// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
-// the empty string if unknown.
-type strAddr string
-
-func (a strAddr) Network() string {
- if a != "" {
- // Per the documentation on net/http.Request.RemoteAddr, if this is
- // set, it's set to the IP:port of the peer (hence, TCP):
- // https://golang.org/pkg/net/http/#Request
- //
- // If we want to support Unix sockets later, we can
- // add our own grpc-specific convention within the
- // grpc codebase to set RemoteAddr to a different
- // format, or probably better: we can attach it to the
- // context and use that from serverHandlerTransport.RemoteAddr.
- return "tcp"
- }
- return ""
-}
-
-func (a strAddr) String() string { return string(a) }
-
-// do runs fn in the ServeHTTP goroutine.
-func (ht *serverHandlerTransport) do(fn func()) error {
- select {
- case <-ht.closedCh:
- return ErrConnClosing
- case ht.writes <- fn:
- return nil
- }
-}
-
-func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status) error {
- ht.writeStatusMu.Lock()
- defer ht.writeStatusMu.Unlock()
-
- headersWritten := s.updateHeaderSent()
- err := ht.do(func() {
- if !headersWritten {
- ht.writePendingHeaders(s)
- }
-
- // And flush, in case no header or body has been sent yet.
- // This forces a separation of headers and trailers if this is the
- // first call (for example, in end2end tests's TestNoService).
- ht.rw.(http.Flusher).Flush()
-
- h := ht.rw.Header()
- h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code()))
- if m := st.Message(); m != "" {
- h.Set("Grpc-Message", encodeGrpcMessage(m))
- }
-
- s.hdrMu.Lock()
- defer s.hdrMu.Unlock()
- if p := st.Proto(); p != nil && len(p.Details) > 0 {
- delete(s.trailer, grpcStatusDetailsBinHeader)
- stBytes, err := proto.Marshal(p)
- if err != nil {
- // TODO: return error instead, when callers are able to handle it.
- panic(err)
- }
-
- h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes))
- }
-
- if len(s.trailer) > 0 {
- for k, vv := range s.trailer {
- // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
- if isReservedHeader(k) {
- continue
- }
- for _, v := range vv {
- // http2 ResponseWriter mechanism to send undeclared Trailers after
- // the headers have possibly been written.
- h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v))
- }
- }
- }
- })
-
- if err == nil { // transport has not been closed
- // Note: The trailer fields are compressed with hpack after this call returns.
- // No WireLength field is set here.
- for _, sh := range ht.stats {
- sh.HandleRPC(s.Context(), &stats.OutTrailer{
- Trailer: s.trailer.Copy(),
- })
- }
- }
- ht.Close(errors.New("finished writing status"))
- return err
-}
-
-// writePendingHeaders sets common and custom headers on the first
-// write call (Write, WriteHeader, or WriteStatus)
-func (ht *serverHandlerTransport) writePendingHeaders(s *ServerStream) {
- ht.writeCommonHeaders(s)
- ht.writeCustomHeaders(s)
-}
-
-// writeCommonHeaders sets common headers on the first write
-// call (Write, WriteHeader, or WriteStatus).
-func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) {
- h := ht.rw.Header()
- h["Date"] = nil // suppress Date to make tests happy; TODO: restore
- h.Set("Content-Type", ht.contentType)
-
- // Predeclare trailers we'll set later in WriteStatus (after the body).
- // This is a SHOULD in the HTTP RFC, and the way you add (known)
- // Trailers per the net/http.ResponseWriter contract.
- // See https://golang.org/pkg/net/http/#ResponseWriter
- // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
- h.Add("Trailer", "Grpc-Status")
- h.Add("Trailer", "Grpc-Message")
- h.Add("Trailer", "Grpc-Status-Details-Bin")
-
- if s.sendCompress != "" {
- h.Set("Grpc-Encoding", s.sendCompress)
- }
-}
-
-// writeCustomHeaders sets custom headers set on the stream via SetHeader
-// on the first write call (Write, WriteHeader, or WriteStatus)
-func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) {
- h := ht.rw.Header()
-
- s.hdrMu.Lock()
- for k, vv := range s.header {
- if isReservedHeader(k) {
- continue
- }
- for _, v := range vv {
- h.Add(k, encodeMetadataHeader(k, v))
- }
- }
-
- s.hdrMu.Unlock()
-}
-
-func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error {
- // Always take a reference because otherwise there is no guarantee the data will
- // be available after this function returns. This is what callers to Write
- // expect.
- data.Ref()
- headersWritten := s.updateHeaderSent()
- err := ht.do(func() {
- defer data.Free()
- if !headersWritten {
- ht.writePendingHeaders(s)
- }
- ht.rw.Write(hdr)
- for _, b := range data {
- _, _ = ht.rw.Write(b.ReadOnlyData())
- }
- ht.rw.(http.Flusher).Flush()
- })
- if err != nil {
- data.Free()
- return err
- }
- return nil
-}
-
-func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) error {
- if err := s.SetHeader(md); err != nil {
- return err
- }
-
- headersWritten := s.updateHeaderSent()
- err := ht.do(func() {
- if !headersWritten {
- ht.writePendingHeaders(s)
- }
-
- ht.rw.WriteHeader(200)
- ht.rw.(http.Flusher).Flush()
- })
-
- if err == nil {
- for _, sh := range ht.stats {
- // Note: The header fields are compressed with hpack after this call returns.
- // No WireLength field is set here.
- sh.HandleRPC(s.Context(), &stats.OutHeader{
- Header: md.Copy(),
- Compression: s.sendCompress,
- })
- }
- }
- return err
-}
-
-func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) {
- // With this transport type there will be exactly 1 stream: this HTTP request.
- var cancel context.CancelFunc
- if ht.timeoutSet {
- ctx, cancel = context.WithTimeout(ctx, ht.timeout)
- } else {
- ctx, cancel = context.WithCancel(ctx)
- }
-
- // requestOver is closed when the status has been written via WriteStatus.
- requestOver := make(chan struct{})
- go func() {
- select {
- case <-requestOver:
- case <-ht.closedCh:
- case <-ht.req.Context().Done():
- }
- cancel()
- ht.Close(errors.New("request is done processing"))
- }()
-
- ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
- req := ht.req
- s := &ServerStream{
- Stream: &Stream{
- id: 0, // irrelevant
- ctx: ctx,
- requestRead: func(int) {},
- buf: newRecvBuffer(),
- method: req.URL.Path,
- recvCompress: req.Header.Get("grpc-encoding"),
- contentSubtype: ht.contentSubtype,
- },
- cancel: cancel,
- st: ht,
- headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
- }
- s.trReader = &transportReader{
- reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
- windowHandler: func(int) {},
- }
-
- // readerDone is closed when the Body.Read-ing goroutine exits.
- readerDone := make(chan struct{})
- go func() {
- defer close(readerDone)
-
- for {
- buf := ht.bufferPool.Get(http2MaxFrameLen)
- n, err := req.Body.Read(*buf)
- if n > 0 {
- *buf = (*buf)[:n]
- s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)})
- } else {
- ht.bufferPool.Put(buf)
- }
- if err != nil {
- s.buf.put(recvMsg{err: mapRecvMsgError(err)})
- return
- }
- }
- }()
-
- // startStream is provided by the *grpc.Server's serveStreams.
- // It starts a goroutine serving s and exits immediately.
- // The goroutine that is started is the one that then calls
- // into ht, calling WriteHeader, Write, WriteStatus, Close, etc.
- startStream(s)
-
- ht.runStream()
- close(requestOver)
-
- // Wait for reading goroutine to finish.
- req.Body.Close()
- <-readerDone
-}
-
-func (ht *serverHandlerTransport) runStream() {
- for {
- select {
- case fn := <-ht.writes:
- fn()
- case <-ht.closedCh:
- return
- }
- }
-}
-
-func (ht *serverHandlerTransport) incrMsgRecv() {}
-
-func (ht *serverHandlerTransport) Drain(string) {
- panic("Drain() is not implemented")
-}
-
-// mapRecvMsgError returns the non-nil err into the appropriate
-// error value as expected by callers of *grpc.parser.recvMsg.
-// In particular, in can only be:
-// - io.EOF
-// - io.ErrUnexpectedEOF
-// - of type transport.ConnectionError
-// - an error from the status package
-func mapRecvMsgError(err error) error {
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- return err
- }
- if se, ok := err.(http2.StreamError); ok {
- if code, ok := http2ErrConvTab[se.Code]; ok {
- return status.Error(code, se.Error())
- }
- }
- if strings.Contains(err.Error(), "body closed by handler") {
- return status.Error(codes.Canceled, err.Error())
- }
- return connectionErrorf(true, err, "%s", err.Error())
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
deleted file mode 100644
index 513dbb93d..000000000
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ /dev/null
@@ -1,1829 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "context"
- "fmt"
- "io"
- "math"
- "net"
- "net/http"
- "path/filepath"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "golang.org/x/net/http2"
- "golang.org/x/net/http2/hpack"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/internal"
- "google.golang.org/grpc/internal/channelz"
- icredentials "google.golang.org/grpc/internal/credentials"
- "google.golang.org/grpc/internal/grpclog"
- "google.golang.org/grpc/internal/grpcsync"
- "google.golang.org/grpc/internal/grpcutil"
- imetadata "google.golang.org/grpc/internal/metadata"
- "google.golang.org/grpc/internal/proxyattributes"
- istatus "google.golang.org/grpc/internal/status"
- isyscall "google.golang.org/grpc/internal/syscall"
- "google.golang.org/grpc/internal/transport/networktype"
- "google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/mem"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
-)
-
-// clientConnectionCounter counts the number of connections a client has
-// initiated (equal to the number of http2Clients created). Must be accessed
-// atomically.
-var clientConnectionCounter uint64
-
-var goAwayLoopyWriterTimeout = 5 * time.Second
-
-var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
-
-// http2Client implements the ClientTransport interface with HTTP2.
-type http2Client struct {
- lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
- ctx context.Context
- cancel context.CancelFunc
- ctxDone <-chan struct{} // Cache the ctx.Done() chan.
- userAgent string
- // address contains the resolver returned address for this transport.
- // If the `ServerName` field is set, it takes precedence over `CallHdr.Host`
- // passed to `NewStream`, when determining the :authority header.
- address resolver.Address
- md metadata.MD
- conn net.Conn // underlying communication channel
- loopy *loopyWriter
- remoteAddr net.Addr
- localAddr net.Addr
- authInfo credentials.AuthInfo // auth info about the connection
-
- readerDone chan struct{} // sync point to enable testing.
- writerDone chan struct{} // sync point to enable testing.
- // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
- // that the server sent GoAway on this transport.
- goAway chan struct{}
- keepaliveDone chan struct{} // Closed when the keepalive goroutine exits.
- framer *framer
- // controlBuf delivers all the control related tasks (e.g., window
- // updates, reset streams, and various settings) to the controller.
- // Do not access controlBuf with mu held.
- controlBuf *controlBuffer
- fc *trInFlow
- // The scheme used: https if TLS is on, http otherwise.
- scheme string
-
- isSecure bool
-
- perRPCCreds []credentials.PerRPCCredentials
-
- kp keepalive.ClientParameters
- keepaliveEnabled bool
-
- statsHandlers []stats.Handler
-
- initialWindowSize int32
-
- // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE
- maxSendHeaderListSize *uint32
-
- bdpEst *bdpEstimator
-
- maxConcurrentStreams uint32
- streamQuota int64
- streamsQuotaAvailable chan struct{}
- waitingStreams uint32
- registeredCompressors string
-
- // Do not access controlBuf with mu held.
- mu sync.Mutex // guard the following variables
- nextID uint32
- state transportState
- activeStreams map[uint32]*ClientStream
- // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
- prevGoAwayID uint32
- // goAwayReason records the http2.ErrCode and debug data received with the
- // GoAway frame.
- goAwayReason GoAwayReason
- // goAwayDebugMessage contains a detailed human readable string about a
- // GoAway frame, useful for error messages.
- goAwayDebugMessage string
- // A condition variable used to signal when the keepalive goroutine should
- // go dormant. The condition for dormancy is based on the number of active
- // streams and the `PermitWithoutStream` keepalive client parameter. And
- // since the number of active streams is guarded by the above mutex, we use
- // the same for this condition variable as well.
- kpDormancyCond *sync.Cond
- // A boolean to track whether the keepalive goroutine is dormant or not.
- // This is checked before attempting to signal the above condition
- // variable.
- kpDormant bool
-
- channelz *channelz.Socket
-
- onClose func(GoAwayReason)
-
- bufferPool mem.BufferPool
-
- connectionID uint64
- logger *grpclog.PrefixLogger
-}
-
-func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, grpcUA string) (net.Conn, error) {
- address := addr.Addr
- networkType, ok := networktype.Get(addr)
- if fn != nil {
- // Special handling for unix scheme with custom dialer. Back in the day,
- // we did not have a unix resolver and therefore targets with a unix
- // scheme would end up using the passthrough resolver. So, user's used a
- // custom dialer in this case and expected the original dial target to
- // be passed to the custom dialer. Now, we have a unix resolver. But if
- // a custom dialer is specified, we want to retain the old behavior in
- // terms of the address being passed to the custom dialer.
- if networkType == "unix" && !strings.HasPrefix(address, "\x00") {
- // Supported unix targets are either "unix://absolute-path" or
- // "unix:relative-path".
- if filepath.IsAbs(address) {
- return fn(ctx, "unix://"+address)
- }
- return fn(ctx, "unix:"+address)
- }
- return fn(ctx, address)
- }
- if !ok {
- networkType, address = parseDialTarget(address)
- }
- if opts, present := proxyattributes.Get(addr); present {
- return proxyDial(ctx, addr, grpcUA, opts)
- }
- return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address)
-}
-
-func isTemporary(err error) bool {
- switch err := err.(type) {
- case interface {
- Temporary() bool
- }:
- return err.Temporary()
- case interface {
- Timeout() bool
- }:
- // Timeouts may be resolved upon retry, and are thus treated as
- // temporary.
- return err.Timeout()
- }
- return true
-}
-
-// NewHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
-// and starts to receive messages on it. Non-nil error returns if construction
-// fails.
-func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ ClientTransport, err error) {
- scheme := "http"
- ctx, cancel := context.WithCancel(ctx)
- defer func() {
- if err != nil {
- cancel()
- }
- }()
-
- // gRPC, resolver, balancer etc. can specify arbitrary data in the
- // Attributes field of resolver.Address, which is shoved into connectCtx
- // and passed to the dialer and credential handshaker. This makes it possible for
- // address specific arbitrary data to reach custom dialers and credential handshakers.
- connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes})
-
- conn, err := dial(connectCtx, opts.Dialer, addr, opts.UserAgent)
- if err != nil {
- if opts.FailOnNonTempDialError {
- return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
- }
- return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err)
- }
-
- // Any further errors will close the underlying connection
- defer func(conn net.Conn) {
- if err != nil {
- conn.Close()
- }
- }(conn)
-
- // The following defer and goroutine monitor the connectCtx for cancellation
- // and deadline. On context expiration, the connection is hard closed and
- // this function will naturally fail as a result. Otherwise, the defer
- // waits for the goroutine to exit to prevent the context from being
- // monitored (and to prevent the connection from ever being closed) after
- // returning from this function.
- ctxMonitorDone := grpcsync.NewEvent()
- newClientCtx, newClientDone := context.WithCancel(connectCtx)
- defer func() {
- newClientDone() // Awaken the goroutine below if connectCtx hasn't expired.
- <-ctxMonitorDone.Done() // Wait for the goroutine below to exit.
- }()
- go func(conn net.Conn) {
- defer ctxMonitorDone.Fire() // Signal this goroutine has exited.
- <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes.
- if err := connectCtx.Err(); err != nil {
- // connectCtx expired before exiting the function. Hard close the connection.
- if logger.V(logLevel) {
- logger.Infof("Aborting due to connect deadline expiring: %v", err)
- }
- conn.Close()
- }
- }(conn)
-
- kp := opts.KeepaliveParams
- // Validate keepalive parameters.
- if kp.Time == 0 {
- kp.Time = defaultClientKeepaliveTime
- }
- if kp.Timeout == 0 {
- kp.Timeout = defaultClientKeepaliveTimeout
- }
- keepaliveEnabled := false
- if kp.Time != infinity {
- if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
- return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
- }
- keepaliveEnabled = true
- }
- var (
- isSecure bool
- authInfo credentials.AuthInfo
- )
- transportCreds := opts.TransportCredentials
- perRPCCreds := opts.PerRPCCredentials
-
- if b := opts.CredsBundle; b != nil {
- if t := b.TransportCredentials(); t != nil {
- transportCreds = t
- }
- if t := b.PerRPCCredentials(); t != nil {
- perRPCCreds = append(perRPCCreds, t)
- }
- }
- if transportCreds != nil {
- conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn)
- if err != nil {
- return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
- }
- for _, cd := range perRPCCreds {
- if cd.RequireTransportSecurity() {
- if ci, ok := authInfo.(interface {
- GetCommonAuthInfo() credentials.CommonAuthInfo
- }); ok {
- secLevel := ci.GetCommonAuthInfo().SecurityLevel
- if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity {
- return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection")
- }
- }
- }
- }
- isSecure = true
- if transportCreds.Info().SecurityProtocol == "tls" {
- scheme = "https"
- }
- }
- dynamicWindow := true
- icwz := int32(initialWindowSize)
- if opts.InitialConnWindowSize >= defaultWindowSize {
- icwz = opts.InitialConnWindowSize
- dynamicWindow = false
- }
- writeBufSize := opts.WriteBufferSize
- readBufSize := opts.ReadBufferSize
- maxHeaderListSize := defaultClientMaxHeaderListSize
- if opts.MaxHeaderListSize != nil {
- maxHeaderListSize = *opts.MaxHeaderListSize
- }
-
- t := &http2Client{
- ctx: ctx,
- ctxDone: ctx.Done(), // Cache Done chan.
- cancel: cancel,
- userAgent: opts.UserAgent,
- registeredCompressors: grpcutil.RegisteredCompressors(),
- address: addr,
- conn: conn,
- remoteAddr: conn.RemoteAddr(),
- localAddr: conn.LocalAddr(),
- authInfo: authInfo,
- readerDone: make(chan struct{}),
- writerDone: make(chan struct{}),
- goAway: make(chan struct{}),
- keepaliveDone: make(chan struct{}),
- framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
- fc: &trInFlow{limit: uint32(icwz)},
- scheme: scheme,
- activeStreams: make(map[uint32]*ClientStream),
- isSecure: isSecure,
- perRPCCreds: perRPCCreds,
- kp: kp,
- statsHandlers: opts.StatsHandlers,
- initialWindowSize: initialWindowSize,
- nextID: 1,
- maxConcurrentStreams: defaultMaxStreamsClient,
- streamQuota: defaultMaxStreamsClient,
- streamsQuotaAvailable: make(chan struct{}, 1),
- keepaliveEnabled: keepaliveEnabled,
- bufferPool: opts.BufferPool,
- onClose: onClose,
- }
- var czSecurity credentials.ChannelzSecurityValue
- if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
- czSecurity = au.GetSecurityValue()
- }
- t.channelz = channelz.RegisterSocket(
- &channelz.Socket{
- SocketType: channelz.SocketTypeNormal,
- Parent: opts.ChannelzParent,
- SocketMetrics: channelz.SocketMetrics{},
- EphemeralMetrics: t.socketMetrics,
- LocalAddr: t.localAddr,
- RemoteAddr: t.remoteAddr,
- SocketOptions: channelz.GetSocketOption(t.conn),
- Security: czSecurity,
- })
- t.logger = prefixLoggerForClientTransport(t)
- // Add peer information to the http2client context.
- t.ctx = peer.NewContext(t.ctx, t.getPeer())
-
- if md, ok := addr.Metadata.(*metadata.MD); ok {
- t.md = *md
- } else if md := imetadata.Get(addr); md != nil {
- t.md = md
- }
- t.controlBuf = newControlBuffer(t.ctxDone)
- if opts.InitialWindowSize >= defaultWindowSize {
- t.initialWindowSize = opts.InitialWindowSize
- dynamicWindow = false
- }
- if dynamicWindow {
- t.bdpEst = &bdpEstimator{
- bdp: initialWindowSize,
- updateFlowControl: t.updateFlowControl,
- }
- }
- for _, sh := range t.statsHandlers {
- t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
- RemoteAddr: t.remoteAddr,
- LocalAddr: t.localAddr,
- })
- connBegin := &stats.ConnBegin{
- Client: true,
- }
- sh.HandleConn(t.ctx, connBegin)
- }
- if t.keepaliveEnabled {
- t.kpDormancyCond = sync.NewCond(&t.mu)
- go t.keepalive()
- }
-
- // Start the reader goroutine for incoming messages. Each transport has a
- // dedicated goroutine which reads HTTP2 frames from the network. Then it
- // dispatches the frame to the corresponding stream entity. When the
- // server preface is received, readerErrCh is closed. If an error occurs
- // first, an error is pushed to the channel. This must be checked before
- // returning from this function.
- readerErrCh := make(chan error, 1)
- go t.reader(readerErrCh)
- defer func() {
- if err != nil {
- // writerDone should be closed since the loopy goroutine
- // wouldn't have started in the case this function returns an error.
- close(t.writerDone)
- t.Close(err)
- }
- }()
-
- // Send connection preface to server.
- n, err := t.conn.Write(clientPreface)
- if err != nil {
- err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
- return nil, err
- }
- if n != len(clientPreface) {
- err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
- return nil, err
- }
- var ss []http2.Setting
-
- if t.initialWindowSize != defaultWindowSize {
- ss = append(ss, http2.Setting{
- ID: http2.SettingInitialWindowSize,
- Val: uint32(t.initialWindowSize),
- })
- }
- if opts.MaxHeaderListSize != nil {
- ss = append(ss, http2.Setting{
- ID: http2.SettingMaxHeaderListSize,
- Val: *opts.MaxHeaderListSize,
- })
- }
- err = t.framer.fr.WriteSettings(ss...)
- if err != nil {
- err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
- return nil, err
- }
- // Adjust the connection flow control window if needed.
- if delta := uint32(icwz - defaultWindowSize); delta > 0 {
- if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
- err = connectionErrorf(true, err, "transport: failed to write window update: %v", err)
- return nil, err
- }
- }
-
- t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1)
-
- if err := t.framer.writer.Flush(); err != nil {
- return nil, err
- }
- // Block until the server preface is received successfully or an error occurs.
- if err = <-readerErrCh; err != nil {
- return nil, err
- }
- go func() {
- t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
- if err := t.loopy.run(); !isIOError(err) {
- // Immediately close the connection, as the loopy writer returns
- // when there are no more active streams and we were draining (the
- // server sent a GOAWAY). For I/O errors, the reader will hit it
- // after draining any remaining incoming data.
- t.conn.Close()
- }
- close(t.writerDone)
- }()
- return t, nil
-}
-
-func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream {
- // TODO(zhaoq): Handle uint32 overflow of Stream.id.
- s := &ClientStream{
- Stream: &Stream{
- method: callHdr.Method,
- sendCompress: callHdr.SendCompress,
- buf: newRecvBuffer(),
- contentSubtype: callHdr.ContentSubtype,
- },
- ct: t,
- done: make(chan struct{}),
- headerChan: make(chan struct{}),
- doneFunc: callHdr.DoneFunc,
- }
- s.wq = newWriteQuota(defaultWriteQuota, s.done)
- s.requestRead = func(n int) {
- t.adjustWindow(s, uint32(n))
- }
- // The client side stream context should have exactly the same life cycle with the user provided context.
- // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done.
- // So we use the original context here instead of creating a copy.
- s.ctx = ctx
- s.trReader = &transportReader{
- reader: &recvBufferReader{
- ctx: s.ctx,
- ctxDone: s.ctx.Done(),
- recv: s.buf,
- closeStream: func(err error) {
- s.Close(err)
- },
- },
- windowHandler: func(n int) {
- t.updateWindow(s, uint32(n))
- },
- }
- return s
-}
-
-func (t *http2Client) getPeer() *peer.Peer {
- return &peer.Peer{
- Addr: t.remoteAddr,
- AuthInfo: t.authInfo, // Can be nil
- LocalAddr: t.localAddr,
- }
-}
-
-// OutgoingGoAwayHandler writes a GOAWAY to the connection. Always returns (false, err) as we want the GoAway
-// to be the last frame loopy writes to the transport.
-func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) {
- t.mu.Lock()
- maxStreamID := t.nextID - 2
- t.mu.Unlock()
- if err := t.framer.fr.WriteGoAway(maxStreamID, http2.ErrCodeNo, g.debugData); err != nil {
- return false, err
- }
- return false, g.closeConn
-}
-
-func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
- aud := t.createAudience(callHdr)
- ri := credentials.RequestInfo{
- Method: callHdr.Method,
- AuthInfo: t.authInfo,
- }
- ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri)
- authData, err := t.getTrAuthData(ctxWithRequestInfo, aud)
- if err != nil {
- return nil, err
- }
- callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr)
- if err != nil {
- return nil, err
- }
- // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
- // first and create a slice of that exact size.
- // Make the slice of certain predictable size to reduce allocations made by append.
- hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
- hfLen += len(authData) + len(callAuthData)
- headerFields := make([]hpack.HeaderField, 0, hfLen)
- headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"})
- headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
- headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
- headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
- headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)})
- headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
- headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
- if callHdr.PreviousAttempts > 0 {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
- }
-
- registeredCompressors := t.registeredCompressors
- if callHdr.SendCompress != "" {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
- // Include the outgoing compressor name when compressor is not registered
- // via encoding.RegisterCompressor. This is possible when client uses
- // WithCompressor dial option.
- if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) {
- if registeredCompressors != "" {
- registeredCompressors += ","
- }
- registeredCompressors += callHdr.SendCompress
- }
- }
-
- if registeredCompressors != "" {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors})
- }
- if dl, ok := ctx.Deadline(); ok {
- // Send out timeout regardless its value. The server can detect timeout context by itself.
- // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
- timeout := time.Until(dl)
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)})
- }
- for k, v := range authData {
- headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
- }
- for k, v := range callAuthData {
- headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
- }
-
- if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
- var k string
- for k, vv := range md {
- // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
- if isReservedHeader(k) {
- continue
- }
- for _, v := range vv {
- headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
- }
- }
- for _, vv := range added {
- for i, v := range vv {
- if i%2 == 0 {
- k = strings.ToLower(v)
- continue
- }
- // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
- if isReservedHeader(k) {
- continue
- }
- headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
- }
- }
- }
- for k, vv := range t.md {
- if isReservedHeader(k) {
- continue
- }
- for _, v := range vv {
- headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
- }
- }
- return headerFields, nil
-}
-
-func (t *http2Client) createAudience(callHdr *CallHdr) string {
- // Create an audience string only if needed.
- if len(t.perRPCCreds) == 0 && callHdr.Creds == nil {
- return ""
- }
- // Construct URI required to get auth request metadata.
- // Omit port if it is the default one.
- host := strings.TrimSuffix(callHdr.Host, ":443")
- pos := strings.LastIndex(callHdr.Method, "/")
- if pos == -1 {
- pos = len(callHdr.Method)
- }
- return "https://" + host + callHdr.Method[:pos]
-}
-
-func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) {
- if len(t.perRPCCreds) == 0 {
- return nil, nil
- }
- authData := map[string]string{}
- for _, c := range t.perRPCCreds {
- data, err := c.GetRequestMetadata(ctx, audience)
- if err != nil {
- if st, ok := status.FromError(err); ok {
- // Restrict the code to the list allowed by gRFC A54.
- if istatus.IsRestrictedControlPlaneCode(st) {
- err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
- }
- return nil, err
- }
-
- return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err)
- }
- for k, v := range data {
- // Capital header names are illegal in HTTP/2.
- k = strings.ToLower(k)
- authData[k] = v
- }
- }
- return authData, nil
-}
-
-func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) {
- var callAuthData map[string]string
- // Check if credentials.PerRPCCredentials were provided via call options.
- // Note: if these credentials are provided both via dial options and call
- // options, then both sets of credentials will be applied.
- if callCreds := callHdr.Creds; callCreds != nil {
- if callCreds.RequireTransportSecurity() {
- ri, _ := credentials.RequestInfoFromContext(ctx)
- if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil {
- return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
- }
- }
- data, err := callCreds.GetRequestMetadata(ctx, audience)
- if err != nil {
- if st, ok := status.FromError(err); ok {
- // Restrict the code to the list allowed by gRFC A54.
- if istatus.IsRestrictedControlPlaneCode(st) {
- err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
- }
- return nil, err
- }
- return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err)
- }
- callAuthData = make(map[string]string, len(data))
- for k, v := range data {
- // Capital header names are illegal in HTTP/2
- k = strings.ToLower(k)
- callAuthData[k] = v
- }
- }
- return callAuthData, nil
-}
-
-// NewStreamError wraps an error and reports additional information. Typically
-// NewStream errors result in transparent retry, as they mean nothing went onto
-// the wire. However, there are two notable exceptions:
-//
-// 1. If the stream headers violate the max header list size allowed by the
-// server. It's possible this could succeed on another transport, even if
-// it's unlikely, but do not transparently retry.
-// 2. If the credentials errored when requesting their headers. In this case,
-// it's possible a retry can fix the problem, but indefinitely transparently
-// retrying is not appropriate as it is likely the credentials, if they can
-// eventually succeed, would need I/O to do so.
-type NewStreamError struct {
- Err error
-
- AllowTransparentRetry bool
-}
-
-func (e NewStreamError) Error() string {
- return e.Err.Error()
-}
-
-// NewStream creates a stream and registers it into the transport as "active"
-// streams. All non-nil errors returned will be *NewStreamError.
-func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) {
- ctx = peer.NewContext(ctx, t.getPeer())
-
- // ServerName field of the resolver returned address takes precedence over
- // Host field of CallHdr to determine the :authority header. This is because,
- // the ServerName field takes precedence for server authentication during
- // TLS handshake, and the :authority header should match the value used
- // for server authentication.
- if t.address.ServerName != "" {
- newCallHdr := *callHdr
- newCallHdr.Host = t.address.ServerName
- callHdr = &newCallHdr
- }
-
- headerFields, err := t.createHeaderFields(ctx, callHdr)
- if err != nil {
- return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
- }
- s := t.newStream(ctx, callHdr)
- cleanup := func(err error) {
- if s.swapState(streamDone) == streamDone {
- // If it was already done, return.
- return
- }
- // The stream was unprocessed by the server.
- s.unprocessed.Store(true)
- s.write(recvMsg{err: err})
- close(s.done)
- // If headerChan isn't closed, then close it.
- if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
- close(s.headerChan)
- }
- }
- hdr := &headerFrame{
- hf: headerFields,
- endStream: false,
- initStream: func(uint32) error {
- t.mu.Lock()
- // TODO: handle transport closure in loopy instead and remove this
- // initStream is never called when transport is draining.
- if t.state == closing {
- t.mu.Unlock()
- cleanup(ErrConnClosing)
- return ErrConnClosing
- }
- if channelz.IsOn() {
- t.channelz.SocketMetrics.StreamsStarted.Add(1)
- t.channelz.SocketMetrics.LastLocalStreamCreatedTimestamp.Store(time.Now().UnixNano())
- }
- // If the keepalive goroutine has gone dormant, wake it up.
- if t.kpDormant {
- t.kpDormancyCond.Signal()
- }
- t.mu.Unlock()
- return nil
- },
- onOrphaned: cleanup,
- wq: s.wq,
- }
- firstTry := true
- var ch chan struct{}
- transportDrainRequired := false
- checkForStreamQuota := func() bool {
- if t.streamQuota <= 0 { // Can go negative if server decreases it.
- if firstTry {
- t.waitingStreams++
- }
- ch = t.streamsQuotaAvailable
- return false
- }
- if !firstTry {
- t.waitingStreams--
- }
- t.streamQuota--
-
- t.mu.Lock()
- if t.state == draining || t.activeStreams == nil { // Can be niled from Close().
- t.mu.Unlock()
- return false // Don't create a stream if the transport is already closed.
- }
-
- hdr.streamID = t.nextID
- t.nextID += 2
- // Drain client transport if nextID > MaxStreamID which signals gRPC that
- // the connection is closed and a new one must be created for subsequent RPCs.
- transportDrainRequired = t.nextID > MaxStreamID
-
- s.id = hdr.streamID
- s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
- t.activeStreams[s.id] = s
- t.mu.Unlock()
-
- if t.streamQuota > 0 && t.waitingStreams > 0 {
- select {
- case t.streamsQuotaAvailable <- struct{}{}:
- default:
- }
- }
- return true
- }
- var hdrListSizeErr error
- checkForHeaderListSize := func() bool {
- if t.maxSendHeaderListSize == nil {
- return true
- }
- var sz int64
- for _, f := range hdr.hf {
- if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
- hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize)
- return false
- }
- }
- return true
- }
- for {
- success, err := t.controlBuf.executeAndPut(func() bool {
- return checkForHeaderListSize() && checkForStreamQuota()
- }, hdr)
- if err != nil {
- // Connection closed.
- return nil, &NewStreamError{Err: err, AllowTransparentRetry: true}
- }
- if success {
- break
- }
- if hdrListSizeErr != nil {
- return nil, &NewStreamError{Err: hdrListSizeErr}
- }
- firstTry = false
- select {
- case <-ch:
- case <-ctx.Done():
- return nil, &NewStreamError{Err: ContextErr(ctx.Err())}
- case <-t.goAway:
- return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true}
- case <-t.ctx.Done():
- return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
- }
- }
- if len(t.statsHandlers) != 0 {
- header, ok := metadata.FromOutgoingContext(ctx)
- if ok {
- header.Set("user-agent", t.userAgent)
- } else {
- header = metadata.Pairs("user-agent", t.userAgent)
- }
- for _, sh := range t.statsHandlers {
- // Note: The header fields are compressed with hpack after this call returns.
- // No WireLength field is set here.
- // Note: Creating a new stats object to prevent pollution.
- outHeader := &stats.OutHeader{
- Client: true,
- FullMethod: callHdr.Method,
- RemoteAddr: t.remoteAddr,
- LocalAddr: t.localAddr,
- Compression: callHdr.SendCompress,
- Header: header,
- }
- sh.HandleRPC(s.ctx, outHeader)
- }
- }
- if transportDrainRequired {
- if t.logger.V(logLevel) {
- t.logger.Infof("Draining transport: t.nextID > MaxStreamID")
- }
- t.GracefulClose()
- }
- return s, nil
-}
-
-func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
- // Set stream status to done.
- if s.swapState(streamDone) == streamDone {
- // If it was already done, return. If multiple closeStream calls
- // happen simultaneously, wait for the first to finish.
- <-s.done
- return
- }
- // status and trailers can be updated here without any synchronization because the stream goroutine will
- // only read it after it sees an io.EOF error from read or write and we'll write those errors
- // only after updating this.
- s.status = st
- if len(mdata) > 0 {
- s.trailer = mdata
- }
- if err != nil {
- // This will unblock reads eventually.
- s.write(recvMsg{err: err})
- }
- // If headerChan isn't closed, then close it.
- if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
- s.noHeaders = true
- close(s.headerChan)
- }
- cleanup := &cleanupStream{
- streamID: s.id,
- onWrite: func() {
- t.mu.Lock()
- if t.activeStreams != nil {
- delete(t.activeStreams, s.id)
- }
- t.mu.Unlock()
- if channelz.IsOn() {
- if eosReceived {
- t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
- } else {
- t.channelz.SocketMetrics.StreamsFailed.Add(1)
- }
- }
- },
- rst: rst,
- rstCode: rstCode,
- }
- addBackStreamQuota := func() bool {
- t.streamQuota++
- if t.streamQuota > 0 && t.waitingStreams > 0 {
- select {
- case t.streamsQuotaAvailable <- struct{}{}:
- default:
- }
- }
- return true
- }
- t.controlBuf.executeAndPut(addBackStreamQuota, cleanup)
- // This will unblock write.
- close(s.done)
- if s.doneFunc != nil {
- s.doneFunc()
- }
-}
-
-// Close kicks off the shutdown process of the transport. This should be called
-// only once on a transport. Once it is called, the transport should not be
-// accessed anymore.
-func (t *http2Client) Close(err error) {
- t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10))
- t.mu.Lock()
- // Make sure we only close once.
- if t.state == closing {
- t.mu.Unlock()
- return
- }
- if t.logger.V(logLevel) {
- t.logger.Infof("Closing: %v", err)
- }
- // Call t.onClose ASAP to prevent the client from attempting to create new
- // streams.
- if t.state != draining {
- t.onClose(GoAwayInvalid)
- }
- t.state = closing
- streams := t.activeStreams
- t.activeStreams = nil
- if t.kpDormant {
- // If the keepalive goroutine is blocked on this condition variable, we
- // should unblock it so that the goroutine eventually exits.
- t.kpDormancyCond.Signal()
- }
- // Append info about previous goaways if there were any, since this may be important
- // for understanding the root cause for this connection to be closed.
- goAwayDebugMessage := t.goAwayDebugMessage
- t.mu.Unlock()
-
- // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the
- // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It
- // also waits for loopyWriter to be closed with a timer to avoid the
- // long blocking in case the connection is blackholed, i.e. TCP is
- // just stuck.
- t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err})
- timer := time.NewTimer(goAwayLoopyWriterTimeout)
- defer timer.Stop()
- select {
- case <-t.writerDone: // success
- case <-timer.C:
- t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout)
- }
- t.cancel()
- t.conn.Close()
- // Waits for the reader and keepalive goroutines to exit before returning to
- // ensure all resources are cleaned up before Close can return.
- <-t.readerDone
- if t.keepaliveEnabled {
- <-t.keepaliveDone
- }
- channelz.RemoveEntry(t.channelz.ID)
- var st *status.Status
- if len(goAwayDebugMessage) > 0 {
- st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage)
- err = st.Err()
- } else {
- st = status.New(codes.Unavailable, err.Error())
- }
-
- // Notify all active streams.
- for _, s := range streams {
- t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false)
- }
- for _, sh := range t.statsHandlers {
- connEnd := &stats.ConnEnd{
- Client: true,
- }
- sh.HandleConn(t.ctx, connEnd)
- }
-}
-
-// GracefulClose sets the state to draining, which prevents new streams from
-// being created and causes the transport to be closed when the last active
-// stream is closed. If there are no active streams, the transport is closed
-// immediately. This does nothing if the transport is already draining or
-// closing.
-func (t *http2Client) GracefulClose() {
- t.mu.Lock()
- // Make sure we move to draining only from active.
- if t.state == draining || t.state == closing {
- t.mu.Unlock()
- return
- }
- if t.logger.V(logLevel) {
- t.logger.Infof("GracefulClose called")
- }
- t.onClose(GoAwayInvalid)
- t.state = draining
- active := len(t.activeStreams)
- t.mu.Unlock()
- if active == 0 {
- t.Close(connectionErrorf(true, nil, "no active streams left to process while draining"))
- return
- }
- t.controlBuf.put(&incomingGoAway{})
-}
-
-// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
-// should proceed only if Write returns nil.
-func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
- reader := data.Reader()
-
- if opts.Last {
- // If it's the last message, update stream state.
- if !s.compareAndSwapState(streamActive, streamWriteDone) {
- _ = reader.Close()
- return errStreamDone
- }
- } else if s.getState() != streamActive {
- _ = reader.Close()
- return errStreamDone
- }
- df := &dataFrame{
- streamID: s.id,
- endStream: opts.Last,
- h: hdr,
- reader: reader,
- }
- if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota.
- if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
- _ = reader.Close()
- return err
- }
- }
- if err := t.controlBuf.put(df); err != nil {
- _ = reader.Close()
- return err
- }
- t.incrMsgSent()
- return nil
-}
-
-func (t *http2Client) getStream(f http2.Frame) *ClientStream {
- t.mu.Lock()
- s := t.activeStreams[f.Header().StreamID]
- t.mu.Unlock()
- return s
-}
-
-// adjustWindow sends out extra window update over the initial window size
-// of stream if the application is requesting data larger in size than
-// the window.
-func (t *http2Client) adjustWindow(s *ClientStream, n uint32) {
- if w := s.fc.maybeAdjust(n); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
- }
-}
-
-// updateWindow adjusts the inbound quota for the stream.
-// Window updates will be sent out when the cumulative quota
-// exceeds the corresponding threshold.
-func (t *http2Client) updateWindow(s *ClientStream, n uint32) {
- if w := s.fc.onRead(n); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
- }
-}
-
-// updateFlowControl updates the incoming flow control windows
-// for the transport and the stream based on the current bdp
-// estimation.
-func (t *http2Client) updateFlowControl(n uint32) {
- updateIWS := func() bool {
- t.initialWindowSize = int32(n)
- t.mu.Lock()
- for _, s := range t.activeStreams {
- s.fc.newLimit(n)
- }
- t.mu.Unlock()
- return true
- }
- t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)})
- t.controlBuf.put(&outgoingSettings{
- ss: []http2.Setting{
- {
- ID: http2.SettingInitialWindowSize,
- Val: n,
- },
- },
- })
-}
-
-func (t *http2Client) handleData(f *http2.DataFrame) {
- size := f.Header().Length
- var sendBDPPing bool
- if t.bdpEst != nil {
- sendBDPPing = t.bdpEst.add(size)
- }
- // Decouple connection's flow control from application's read.
- // An update on connection's flow control should not depend on
- // whether user application has read the data or not. Such a
- // restriction is already imposed on the stream's flow control,
- // and therefore the sender will be blocked anyways.
- // Decoupling the connection flow control will prevent other
- // active(fast) streams from starving in presence of slow or
- // inactive streams.
- //
- if w := t.fc.onData(size); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{
- streamID: 0,
- increment: w,
- })
- }
- if sendBDPPing {
- // Avoid excessive ping detection (e.g. in an L7 proxy)
- // by sending a window update prior to the BDP ping.
-
- if w := t.fc.reset(); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{
- streamID: 0,
- increment: w,
- })
- }
-
- t.controlBuf.put(bdpPing)
- }
- // Select the right stream to dispatch.
- s := t.getStream(f)
- if s == nil {
- return
- }
- if size > 0 {
- if err := s.fc.onData(size); err != nil {
- t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false)
- return
- }
- if f.Header().Flags.Has(http2.FlagDataPadded) {
- if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
- }
- }
- // TODO(bradfitz, zhaoq): A copy is required here because there is no
- // guarantee f.Data() is consumed before the arrival of next frame.
- // Can this copy be eliminated?
- if len(f.Data()) > 0 {
- pool := t.bufferPool
- if pool == nil {
- // Note that this is only supposed to be nil in tests. Otherwise, stream is
- // always initialized with a BufferPool.
- pool = mem.DefaultBufferPool()
- }
- s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
- }
- }
- // The server has closed the stream without sending trailers. Record that
- // the read direction is closed, and set the status appropriately.
- if f.StreamEnded() {
- t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true)
- }
-}
-
-func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
- s := t.getStream(f)
- if s == nil {
- return
- }
- if f.ErrCode == http2.ErrCodeRefusedStream {
- // The stream was unprocessed by the server.
- s.unprocessed.Store(true)
- }
- statusCode, ok := http2ErrConvTab[f.ErrCode]
- if !ok {
- if t.logger.V(logLevel) {
- t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode)
- }
- statusCode = codes.Unknown
- }
- if statusCode == codes.Canceled {
- if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) {
- // Our deadline was already exceeded, and that was likely the cause
- // of this cancellation. Alter the status code accordingly.
- statusCode = codes.DeadlineExceeded
- }
- }
- t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false)
-}
-
-func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
- if f.IsAck() {
- return
- }
- var maxStreams *uint32
- var ss []http2.Setting
- var updateFuncs []func()
- f.ForeachSetting(func(s http2.Setting) error {
- switch s.ID {
- case http2.SettingMaxConcurrentStreams:
- maxStreams = new(uint32)
- *maxStreams = s.Val
- case http2.SettingMaxHeaderListSize:
- updateFuncs = append(updateFuncs, func() {
- t.maxSendHeaderListSize = new(uint32)
- *t.maxSendHeaderListSize = s.Val
- })
- default:
- ss = append(ss, s)
- }
- return nil
- })
- if isFirst && maxStreams == nil {
- maxStreams = new(uint32)
- *maxStreams = math.MaxUint32
- }
- sf := &incomingSettings{
- ss: ss,
- }
- if maxStreams != nil {
- updateStreamQuota := func() {
- delta := int64(*maxStreams) - int64(t.maxConcurrentStreams)
- t.maxConcurrentStreams = *maxStreams
- t.streamQuota += delta
- if delta > 0 && t.waitingStreams > 0 {
- close(t.streamsQuotaAvailable) // wake all of them up.
- t.streamsQuotaAvailable = make(chan struct{}, 1)
- }
- }
- updateFuncs = append(updateFuncs, updateStreamQuota)
- }
- t.controlBuf.executeAndPut(func() bool {
- for _, f := range updateFuncs {
- f()
- }
- return true
- }, sf)
-}
-
-func (t *http2Client) handlePing(f *http2.PingFrame) {
- if f.IsAck() {
- // Maybe it's a BDP ping.
- if t.bdpEst != nil {
- t.bdpEst.calculate(f.Data)
- }
- return
- }
- pingAck := &ping{ack: true}
- copy(pingAck.data[:], f.Data[:])
- t.controlBuf.put(pingAck)
-}
-
-func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error {
- t.mu.Lock()
- if t.state == closing {
- t.mu.Unlock()
- return nil
- }
- if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" {
- // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug
- // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is
- // enabled by default and double the configure KEEPALIVE_TIME used for new connections
- // on that channel.
- logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".")
- }
- id := f.LastStreamID
- if id > 0 && id%2 == 0 {
- t.mu.Unlock()
- return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)
- }
- // A client can receive multiple GoAways from the server (see
- // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first
- // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be
- // sent after an RTT delay with the ID of the last stream the server will
- // process.
- //
- // Therefore, when we get the first GoAway we don't necessarily close any
- // streams. While in case of second GoAway we close all streams created after
- // the GoAwayId. This way streams that were in-flight while the GoAway from
- // server was being sent don't get killed.
- select {
- case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways).
- // If there are multiple GoAways the first one should always have an ID greater than the following ones.
- if id > t.prevGoAwayID {
- t.mu.Unlock()
- return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)
- }
- default:
- t.setGoAwayReason(f)
- close(t.goAway)
- defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held.
- // Notify the clientconn about the GOAWAY before we set the state to
- // draining, to allow the client to stop attempting to create streams
- // before disallowing new streams on this connection.
- if t.state != draining {
- t.onClose(t.goAwayReason)
- t.state = draining
- }
- }
- // All streams with IDs greater than the GoAwayId
- // and smaller than the previous GoAway ID should be killed.
- upperLimit := t.prevGoAwayID
- if upperLimit == 0 { // This is the first GoAway Frame.
- upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID.
- }
-
- t.prevGoAwayID = id
- if len(t.activeStreams) == 0 {
- t.mu.Unlock()
- return connectionErrorf(true, nil, "received goaway and there are no active streams")
- }
-
- streamsToClose := make([]*ClientStream, 0)
- for streamID, stream := range t.activeStreams {
- if streamID > id && streamID <= upperLimit {
- // The stream was unprocessed by the server.
- stream.unprocessed.Store(true)
- streamsToClose = append(streamsToClose, stream)
- }
- }
- t.mu.Unlock()
- // Called outside t.mu because closeStream can take controlBuf's mu, which
- // could induce deadlock and is not allowed.
- for _, stream := range streamsToClose {
- t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
- }
- return nil
-}
-
-// setGoAwayReason sets the value of t.goAwayReason based
-// on the GoAway frame received.
-// It expects a lock on transport's mutex to be held by
-// the caller.
-func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
- t.goAwayReason = GoAwayNoReason
- switch f.ErrCode {
- case http2.ErrCodeEnhanceYourCalm:
- if string(f.DebugData()) == "too_many_pings" {
- t.goAwayReason = GoAwayTooManyPings
- }
- }
- if len(f.DebugData()) == 0 {
- t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode)
- } else {
- t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData()))
- }
-}
-
-func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) {
- t.mu.Lock()
- defer t.mu.Unlock()
- return t.goAwayReason, t.goAwayDebugMessage
-}
-
-func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
- t.controlBuf.put(&incomingWindowUpdate{
- streamID: f.Header().StreamID,
- increment: f.Increment,
- })
-}
-
-// operateHeaders takes action on the decoded headers.
-func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
- s := t.getStream(frame)
- if s == nil {
- return
- }
- endStream := frame.StreamEnded()
- s.bytesReceived.Store(true)
- initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0
-
- if !initialHeader && !endStream {
- // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set.
- st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream")
- t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false)
- return
- }
-
- // frame.Truncated is set to true when framer detects that the current header
- // list size hits MaxHeaderListSize limit.
- if frame.Truncated {
- se := status.New(codes.Internal, "peer header list size exceeded limit")
- t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream)
- return
- }
-
- var (
- // If a gRPC Response-Headers has already been received, then it means
- // that the peer is speaking gRPC and we are in gRPC mode.
- isGRPC = !initialHeader
- mdata = make(map[string][]string)
- contentTypeErr = "malformed header: missing HTTP content-type"
- grpcMessage string
- recvCompress string
- httpStatusCode *int
- httpStatusErr string
- rawStatusCode = codes.Unknown
- // headerError is set if an error is encountered while parsing the headers
- headerError string
- )
-
- if initialHeader {
- httpStatusErr = "malformed header: missing HTTP status"
- }
-
- for _, hf := range frame.Fields {
- switch hf.Name {
- case "content-type":
- if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType {
- contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value)
- break
- }
- contentTypeErr = ""
- mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
- isGRPC = true
- case "grpc-encoding":
- recvCompress = hf.Value
- case "grpc-status":
- code, err := strconv.ParseInt(hf.Value, 10, 32)
- if err != nil {
- se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err))
- t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
- return
- }
- rawStatusCode = codes.Code(uint32(code))
- case "grpc-message":
- grpcMessage = decodeGrpcMessage(hf.Value)
- case ":status":
- if hf.Value == "200" {
- httpStatusErr = ""
- statusCode := 200
- httpStatusCode = &statusCode
- break
- }
-
- c, err := strconv.ParseInt(hf.Value, 10, 32)
- if err != nil {
- se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err))
- t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
- return
- }
- statusCode := int(c)
- httpStatusCode = &statusCode
-
- httpStatusErr = fmt.Sprintf(
- "unexpected HTTP status code received from server: %d (%s)",
- statusCode,
- http.StatusText(statusCode),
- )
- default:
- if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
- break
- }
- v, err := decodeMetadataHeader(hf.Name, hf.Value)
- if err != nil {
- headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err)
- logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
- break
- }
- mdata[hf.Name] = append(mdata[hf.Name], v)
- }
- }
-
- if !isGRPC || httpStatusErr != "" {
- var code = codes.Internal // when header does not include HTTP status, return INTERNAL
-
- if httpStatusCode != nil {
- var ok bool
- code, ok = HTTPStatusConvTab[*httpStatusCode]
- if !ok {
- code = codes.Unknown
- }
- }
- var errs []string
- if httpStatusErr != "" {
- errs = append(errs, httpStatusErr)
- }
- if contentTypeErr != "" {
- errs = append(errs, contentTypeErr)
- }
- // Verify the HTTP response is a 200.
- se := status.New(code, strings.Join(errs, "; "))
- t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
- return
- }
-
- if headerError != "" {
- se := status.New(codes.Internal, headerError)
- t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
- return
- }
-
- // For headers, set them in s.header and close headerChan. For trailers or
- // trailers-only, closeStream will set the trailers and close headerChan as
- // needed.
- if !endStream {
- // If headerChan hasn't been closed yet (expected, given we checked it
- // above, but something else could have potentially closed the whole
- // stream).
- if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
- s.headerValid = true
- // These values can be set without any synchronization because
- // stream goroutine will read it only after seeing a closed
- // headerChan which we'll close after setting this.
- s.recvCompress = recvCompress
- if len(mdata) > 0 {
- s.header = mdata
- }
- close(s.headerChan)
- }
- }
-
- for _, sh := range t.statsHandlers {
- if !endStream {
- inHeader := &stats.InHeader{
- Client: true,
- WireLength: int(frame.Header().Length),
- Header: metadata.MD(mdata).Copy(),
- Compression: s.recvCompress,
- }
- sh.HandleRPC(s.ctx, inHeader)
- } else {
- inTrailer := &stats.InTrailer{
- Client: true,
- WireLength: int(frame.Header().Length),
- Trailer: metadata.MD(mdata).Copy(),
- }
- sh.HandleRPC(s.ctx, inTrailer)
- }
- }
-
- if !endStream {
- return
- }
-
- status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader])
-
- // If client received END_STREAM from server while stream was still active,
- // send RST_STREAM.
- rstStream := s.getState() == streamActive
- t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true)
-}
-
-// readServerPreface reads and handles the initial settings frame from the
-// server.
-func (t *http2Client) readServerPreface() error {
- frame, err := t.framer.fr.ReadFrame()
- if err != nil {
- return connectionErrorf(true, err, "error reading server preface: %v", err)
- }
- sf, ok := frame.(*http2.SettingsFrame)
- if !ok {
- return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)
- }
- t.handleSettings(sf, true)
- return nil
-}
-
-// reader verifies the server preface and reads all subsequent data from
-// network connection. If the server preface is not read successfully, an
-// error is pushed to errCh; otherwise errCh is closed with no error.
-func (t *http2Client) reader(errCh chan<- error) {
- var errClose error
- defer func() {
- close(t.readerDone)
- if errClose != nil {
- t.Close(errClose)
- }
- }()
-
- if err := t.readServerPreface(); err != nil {
- errCh <- err
- return
- }
- close(errCh)
- if t.keepaliveEnabled {
- atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
- }
-
- // loop to keep reading incoming messages on this transport.
- for {
- t.controlBuf.throttle()
- frame, err := t.framer.fr.ReadFrame()
- if t.keepaliveEnabled {
- atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
- }
- if err != nil {
- // Abort an active stream if the http2.Framer returns a
- // http2.StreamError. This can happen only if the server's response
- // is malformed http2.
- if se, ok := err.(http2.StreamError); ok {
- t.mu.Lock()
- s := t.activeStreams[se.StreamID]
- t.mu.Unlock()
- if s != nil {
- // use error detail to provide better err message
- code := http2ErrConvTab[se.Code]
- errorDetail := t.framer.fr.ErrorDetail()
- var msg string
- if errorDetail != nil {
- msg = errorDetail.Error()
- } else {
- msg = "received invalid frame"
- }
- t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
- }
- continue
- }
- // Transport error.
- errClose = connectionErrorf(true, err, "error reading from server: %v", err)
- return
- }
- switch frame := frame.(type) {
- case *http2.MetaHeadersFrame:
- t.operateHeaders(frame)
- case *http2.DataFrame:
- t.handleData(frame)
- case *http2.RSTStreamFrame:
- t.handleRSTStream(frame)
- case *http2.SettingsFrame:
- t.handleSettings(frame, false)
- case *http2.PingFrame:
- t.handlePing(frame)
- case *http2.GoAwayFrame:
- errClose = t.handleGoAway(frame)
- case *http2.WindowUpdateFrame:
- t.handleWindowUpdate(frame)
- default:
- if logger.V(logLevel) {
- logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
- }
- }
- }
-}
-
-// keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
-func (t *http2Client) keepalive() {
- var err error
- defer func() {
- close(t.keepaliveDone)
- if err != nil {
- t.Close(err)
- }
- }()
- p := &ping{data: [8]byte{}}
- // True iff a ping has been sent, and no data has been received since then.
- outstandingPing := false
- // Amount of time remaining before which we should receive an ACK for the
- // last sent ping.
- timeoutLeft := time.Duration(0)
- // Records the last value of t.lastRead before we go block on the timer.
- // This is required to check for read activity since then.
- prevNano := time.Now().UnixNano()
- timer := time.NewTimer(t.kp.Time)
- for {
- select {
- case <-timer.C:
- lastRead := atomic.LoadInt64(&t.lastRead)
- if lastRead > prevNano {
- // There has been read activity since the last time we were here.
- outstandingPing = false
- // Next timer should fire at kp.Time seconds from lastRead time.
- timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
- prevNano = lastRead
- continue
- }
- if outstandingPing && timeoutLeft <= 0 {
- err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")
- return
- }
- t.mu.Lock()
- if t.state == closing {
- // If the transport is closing, we should exit from the
- // keepalive goroutine here. If not, we could have a race
- // between the call to Signal() from Close() and the call to
- // Wait() here, whereby the keepalive goroutine ends up
- // blocking on the condition variable which will never be
- // signalled again.
- t.mu.Unlock()
- return
- }
- if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
- // If a ping was sent out previously (because there were active
- // streams at that point) which wasn't acked and its timeout
- // hadn't fired, but we got here and are about to go dormant,
- // we should make sure that we unconditionally send a ping once
- // we awaken.
- outstandingPing = false
- t.kpDormant = true
- t.kpDormancyCond.Wait()
- }
- t.kpDormant = false
- t.mu.Unlock()
-
- // We get here either because we were dormant and a new stream was
- // created which unblocked the Wait() call, or because the
- // keepalive timer expired. In both cases, we need to send a ping.
- if !outstandingPing {
- if channelz.IsOn() {
- t.channelz.SocketMetrics.KeepAlivesSent.Add(1)
- }
- t.controlBuf.put(p)
- timeoutLeft = t.kp.Timeout
- outstandingPing = true
- }
- // The amount of time to sleep here is the minimum of kp.Time and
- // timeoutLeft. This will ensure that we wait only for kp.Time
- // before sending out the next ping (for cases where the ping is
- // acked).
- sleepDuration := min(t.kp.Time, timeoutLeft)
- timeoutLeft -= sleepDuration
- timer.Reset(sleepDuration)
- case <-t.ctx.Done():
- if !timer.Stop() {
- <-timer.C
- }
- return
- }
- }
-}
-
-func (t *http2Client) Error() <-chan struct{} {
- return t.ctx.Done()
-}
-
-func (t *http2Client) GoAway() <-chan struct{} {
- return t.goAway
-}
-
-func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics {
- return &channelz.EphemeralSocketMetrics{
- LocalFlowControlWindow: int64(t.fc.getSize()),
- RemoteFlowControlWindow: t.getOutFlowWindow(),
- }
-}
-
-func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr }
-
-func (t *http2Client) incrMsgSent() {
- if channelz.IsOn() {
- t.channelz.SocketMetrics.MessagesSent.Add(1)
- t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano())
- }
-}
-
-func (t *http2Client) incrMsgRecv() {
- if channelz.IsOn() {
- t.channelz.SocketMetrics.MessagesReceived.Add(1)
- t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano())
- }
-}
-
-func (t *http2Client) getOutFlowWindow() int64 {
- resp := make(chan uint32, 1)
- timer := time.NewTimer(time.Second)
- defer timer.Stop()
- t.controlBuf.put(&outFlowControlSizeRequest{resp})
- select {
- case sz := <-resp:
- return int64(sz)
- case <-t.ctxDone:
- return -1
- case <-timer.C:
- return -2
- }
-}
-
-func (t *http2Client) stateForTesting() transportState {
- t.mu.Lock()
- defer t.mu.Unlock()
- return t.state
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
deleted file mode 100644
index 997b0a59b..000000000
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ /dev/null
@@ -1,1476 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "io"
- "math"
- rand "math/rand/v2"
- "net"
- "net/http"
- "strconv"
- "sync"
- "sync/atomic"
- "time"
-
- "golang.org/x/net/http2"
- "golang.org/x/net/http2/hpack"
- "google.golang.org/grpc/internal/grpclog"
- "google.golang.org/grpc/internal/grpcutil"
- "google.golang.org/grpc/internal/pretty"
- "google.golang.org/grpc/internal/syscall"
- "google.golang.org/grpc/mem"
- "google.golang.org/protobuf/proto"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcsync"
- "google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
- "google.golang.org/grpc/tap"
-)
-
-var (
- // ErrIllegalHeaderWrite indicates that setting header is illegal because of
- // the stream's state.
- ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times")
- // ErrHeaderListSizeLimitViolation indicates that the header list size is larger
- // than the limit set by peer.
- ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer")
-)
-
-// serverConnectionCounter counts the number of connections a server has seen
-// (equal to the number of http2Servers created). Must be accessed atomically.
-var serverConnectionCounter uint64
-
-// http2Server implements the ServerTransport interface with HTTP2.
-type http2Server struct {
- lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
- done chan struct{}
- conn net.Conn
- loopy *loopyWriter
- readerDone chan struct{} // sync point to enable testing.
- loopyWriterDone chan struct{}
- peer peer.Peer
- inTapHandle tap.ServerInHandle
- framer *framer
- // The max number of concurrent streams.
- maxStreams uint32
- // controlBuf delivers all the control related tasks (e.g., window
- // updates, reset streams, and various settings) to the controller.
- controlBuf *controlBuffer
- fc *trInFlow
- stats []stats.Handler
- // Keepalive and max-age parameters for the server.
- kp keepalive.ServerParameters
- // Keepalive enforcement policy.
- kep keepalive.EnforcementPolicy
- // The time instance last ping was received.
- lastPingAt time.Time
- // Number of times the client has violated keepalive ping policy so far.
- pingStrikes uint8
- // Flag to signify that number of ping strikes should be reset to 0.
- // This is set whenever data or header frames are sent.
- // 1 means yes.
- resetPingStrikes uint32 // Accessed atomically.
- initialWindowSize int32
- bdpEst *bdpEstimator
- maxSendHeaderListSize *uint32
-
- mu sync.Mutex // guard the following
-
- // drainEvent is initialized when Drain() is called the first time. After
- // which the server writes out the first GoAway(with ID 2^31-1) frame. Then
- // an independent goroutine will be launched to later send the second
- // GoAway. During this time we don't want to write another first GoAway(with
- // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is
- // already initialized since draining is already underway.
- drainEvent *grpcsync.Event
- state transportState
- activeStreams map[uint32]*ServerStream
- // idle is the time instant when the connection went idle.
- // This is either the beginning of the connection or when the number of
- // RPCs go down to 0.
- // When the connection is busy, this value is set to 0.
- idle time.Time
-
- // Fields below are for channelz metric collection.
- channelz *channelz.Socket
- bufferPool mem.BufferPool
-
- connectionID uint64
-
- // maxStreamMu guards the maximum stream ID
- // This lock may not be taken if mu is already held.
- maxStreamMu sync.Mutex
- maxStreamID uint32 // max stream ID ever seen
-
- logger *grpclog.PrefixLogger
-}
-
-// NewServerTransport creates a http2 transport with conn and configuration
-// options from config.
-//
-// It returns a non-nil transport and a nil error on success. On failure, it
-// returns a nil transport and a non-nil error. For a special case where the
-// underlying conn gets closed before the client preface could be read, it
-// returns a nil transport and a nil error.
-func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
- var authInfo credentials.AuthInfo
- rawConn := conn
- if config.Credentials != nil {
- var err error
- conn, authInfo, err = config.Credentials.ServerHandshake(rawConn)
- if err != nil {
- // ErrConnDispatched means that the connection was dispatched away
- // from gRPC; those connections should be left open. io.EOF means
- // the connection was closed before handshaking completed, which can
- // happen naturally from probers. Return these errors directly.
- if err == credentials.ErrConnDispatched || err == io.EOF {
- return nil, err
- }
- return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
- }
- }
- writeBufSize := config.WriteBufferSize
- readBufSize := config.ReadBufferSize
- maxHeaderListSize := defaultServerMaxHeaderListSize
- if config.MaxHeaderListSize != nil {
- maxHeaderListSize = *config.MaxHeaderListSize
- }
- framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize)
- // Send initial settings as connection preface to client.
- isettings := []http2.Setting{{
- ID: http2.SettingMaxFrameSize,
- Val: http2MaxFrameLen,
- }}
- if config.MaxStreams != math.MaxUint32 {
- isettings = append(isettings, http2.Setting{
- ID: http2.SettingMaxConcurrentStreams,
- Val: config.MaxStreams,
- })
- }
- dynamicWindow := true
- iwz := int32(initialWindowSize)
- if config.InitialWindowSize >= defaultWindowSize {
- iwz = config.InitialWindowSize
- dynamicWindow = false
- }
- icwz := int32(initialWindowSize)
- if config.InitialConnWindowSize >= defaultWindowSize {
- icwz = config.InitialConnWindowSize
- dynamicWindow = false
- }
- if iwz != defaultWindowSize {
- isettings = append(isettings, http2.Setting{
- ID: http2.SettingInitialWindowSize,
- Val: uint32(iwz)})
- }
- if config.MaxHeaderListSize != nil {
- isettings = append(isettings, http2.Setting{
- ID: http2.SettingMaxHeaderListSize,
- Val: *config.MaxHeaderListSize,
- })
- }
- if config.HeaderTableSize != nil {
- isettings = append(isettings, http2.Setting{
- ID: http2.SettingHeaderTableSize,
- Val: *config.HeaderTableSize,
- })
- }
- if err := framer.fr.WriteSettings(isettings...); err != nil {
- return nil, connectionErrorf(false, err, "transport: %v", err)
- }
- // Adjust the connection flow control window if needed.
- if delta := uint32(icwz - defaultWindowSize); delta > 0 {
- if err := framer.fr.WriteWindowUpdate(0, delta); err != nil {
- return nil, connectionErrorf(false, err, "transport: %v", err)
- }
- }
- kp := config.KeepaliveParams
- if kp.MaxConnectionIdle == 0 {
- kp.MaxConnectionIdle = defaultMaxConnectionIdle
- }
- if kp.MaxConnectionAge == 0 {
- kp.MaxConnectionAge = defaultMaxConnectionAge
- }
- // Add a jitter to MaxConnectionAge.
- kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge)
- if kp.MaxConnectionAgeGrace == 0 {
- kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace
- }
- if kp.Time == 0 {
- kp.Time = defaultServerKeepaliveTime
- }
- if kp.Timeout == 0 {
- kp.Timeout = defaultServerKeepaliveTimeout
- }
- if kp.Time != infinity {
- if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil {
- return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
- }
- }
- kep := config.KeepalivePolicy
- if kep.MinTime == 0 {
- kep.MinTime = defaultKeepalivePolicyMinTime
- }
-
- done := make(chan struct{})
- peer := peer.Peer{
- Addr: conn.RemoteAddr(),
- LocalAddr: conn.LocalAddr(),
- AuthInfo: authInfo,
- }
- t := &http2Server{
- done: done,
- conn: conn,
- peer: peer,
- framer: framer,
- readerDone: make(chan struct{}),
- loopyWriterDone: make(chan struct{}),
- maxStreams: config.MaxStreams,
- inTapHandle: config.InTapHandle,
- fc: &trInFlow{limit: uint32(icwz)},
- state: reachable,
- activeStreams: make(map[uint32]*ServerStream),
- stats: config.StatsHandlers,
- kp: kp,
- idle: time.Now(),
- kep: kep,
- initialWindowSize: iwz,
- bufferPool: config.BufferPool,
- }
- var czSecurity credentials.ChannelzSecurityValue
- if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
- czSecurity = au.GetSecurityValue()
- }
- t.channelz = channelz.RegisterSocket(
- &channelz.Socket{
- SocketType: channelz.SocketTypeNormal,
- Parent: config.ChannelzParent,
- SocketMetrics: channelz.SocketMetrics{},
- EphemeralMetrics: t.socketMetrics,
- LocalAddr: t.peer.LocalAddr,
- RemoteAddr: t.peer.Addr,
- SocketOptions: channelz.GetSocketOption(t.conn),
- Security: czSecurity,
- },
- )
- t.logger = prefixLoggerForServerTransport(t)
-
- t.controlBuf = newControlBuffer(t.done)
- if dynamicWindow {
- t.bdpEst = &bdpEstimator{
- bdp: initialWindowSize,
- updateFlowControl: t.updateFlowControl,
- }
- }
-
- t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
- t.framer.writer.Flush()
-
- defer func() {
- if err != nil {
- t.Close(err)
- }
- }()
-
- // Check the validity of client preface.
- preface := make([]byte, len(clientPreface))
- if _, err := io.ReadFull(t.conn, preface); err != nil {
- // In deployments where a gRPC server runs behind a cloud load balancer
- // which performs regular TCP level health checks, the connection is
- // closed immediately by the latter. Returning io.EOF here allows the
- // grpc server implementation to recognize this scenario and suppress
- // logging to reduce spam.
- if err == io.EOF {
- return nil, io.EOF
- }
- return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
- }
- if !bytes.Equal(preface, clientPreface) {
- return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
- }
-
- frame, err := t.framer.fr.ReadFrame()
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- return nil, err
- }
- if err != nil {
- return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
- }
- atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
- sf, ok := frame.(*http2.SettingsFrame)
- if !ok {
- return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
- }
- t.handleSettings(sf)
-
- go func() {
- t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
- err := t.loopy.run()
- close(t.loopyWriterDone)
- if !isIOError(err) {
- // Close the connection if a non-I/O error occurs (for I/O errors
- // the reader will also encounter the error and close). Wait 1
- // second before closing the connection, or when the reader is done
- // (i.e. the client already closed the connection or a connection
- // error occurred). This avoids the potential problem where there
- // is unread data on the receive side of the connection, which, if
- // closed, would lead to a TCP RST instead of FIN, and the client
- // encountering errors. For more info:
- // https://github.com/grpc/grpc-go/issues/5358
- timer := time.NewTimer(time.Second)
- defer timer.Stop()
- select {
- case <-t.readerDone:
- case <-timer.C:
- }
- t.conn.Close()
- }
- }()
- go t.keepalive()
- return t, nil
-}
-
-// operateHeaders takes action on the decoded headers. Returns an error if fatal
-// error encountered and transport needs to close, otherwise returns nil.
-func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*ServerStream)) error {
- // Acquire max stream ID lock for entire duration
- t.maxStreamMu.Lock()
- defer t.maxStreamMu.Unlock()
-
- streamID := frame.Header().StreamID
-
- // frame.Truncated is set to true when framer detects that the current header
- // list size hits MaxHeaderListSize limit.
- if frame.Truncated {
- t.controlBuf.put(&cleanupStream{
- streamID: streamID,
- rst: true,
- rstCode: http2.ErrCodeFrameSize,
- onWrite: func() {},
- })
- return nil
- }
-
- if streamID%2 != 1 || streamID <= t.maxStreamID {
- // illegal gRPC stream id.
- return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame)
- }
- t.maxStreamID = streamID
-
- buf := newRecvBuffer()
- s := &ServerStream{
- Stream: &Stream{
- id: streamID,
- buf: buf,
- fc: &inFlow{limit: uint32(t.initialWindowSize)},
- },
- st: t,
- headerWireLength: int(frame.Header().Length),
- }
- var (
- // if false, content-type was missing or invalid
- isGRPC = false
- contentType = ""
- mdata = make(metadata.MD, len(frame.Fields))
- httpMethod string
- // these are set if an error is encountered while parsing the headers
- protocolError bool
- headerError *status.Status
-
- timeoutSet bool
- timeout time.Duration
- )
-
- for _, hf := range frame.Fields {
- switch hf.Name {
- case "content-type":
- contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value)
- if !validContentType {
- contentType = hf.Value
- break
- }
- mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
- s.contentSubtype = contentSubtype
- isGRPC = true
-
- case "grpc-accept-encoding":
- mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
- if hf.Value == "" {
- continue
- }
- compressors := hf.Value
- if s.clientAdvertisedCompressors != "" {
- compressors = s.clientAdvertisedCompressors + "," + compressors
- }
- s.clientAdvertisedCompressors = compressors
- case "grpc-encoding":
- s.recvCompress = hf.Value
- case ":method":
- httpMethod = hf.Value
- case ":path":
- s.method = hf.Value
- case "grpc-timeout":
- timeoutSet = true
- var err error
- if timeout, err = decodeTimeout(hf.Value); err != nil {
- headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err)
- }
- // "Transports must consider requests containing the Connection header
- // as malformed." - A41
- case "connection":
- if t.logger.V(logLevel) {
- t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec")
- }
- protocolError = true
- default:
- if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
- break
- }
- v, err := decodeMetadataHeader(hf.Name, hf.Value)
- if err != nil {
- headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err)
- t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
- break
- }
- mdata[hf.Name] = append(mdata[hf.Name], v)
- }
- }
-
- // "If multiple Host headers or multiple :authority headers are present, the
- // request must be rejected with an HTTP status code 400 as required by Host
- // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM
- // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2
- // error, this takes precedence over a client not speaking gRPC.
- if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 {
- errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"]))
- if t.logger.V(logLevel) {
- t.logger.Infof("Aborting the stream early: %v", errMsg)
- }
- t.controlBuf.put(&earlyAbortStream{
- httpStatus: http.StatusBadRequest,
- streamID: streamID,
- contentSubtype: s.contentSubtype,
- status: status.New(codes.Internal, errMsg),
- rst: !frame.StreamEnded(),
- })
- return nil
- }
-
- if protocolError {
- t.controlBuf.put(&cleanupStream{
- streamID: streamID,
- rst: true,
- rstCode: http2.ErrCodeProtocol,
- onWrite: func() {},
- })
- return nil
- }
- if !isGRPC {
- t.controlBuf.put(&earlyAbortStream{
- httpStatus: http.StatusUnsupportedMediaType,
- streamID: streamID,
- contentSubtype: s.contentSubtype,
- status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType),
- rst: !frame.StreamEnded(),
- })
- return nil
- }
- if headerError != nil {
- t.controlBuf.put(&earlyAbortStream{
- httpStatus: http.StatusBadRequest,
- streamID: streamID,
- contentSubtype: s.contentSubtype,
- status: headerError,
- rst: !frame.StreamEnded(),
- })
- return nil
- }
-
- // "If :authority is missing, Host must be renamed to :authority." - A41
- if len(mdata[":authority"]) == 0 {
- // No-op if host isn't present, no eventual :authority header is a valid
- // RPC.
- if host, ok := mdata["host"]; ok {
- mdata[":authority"] = host
- delete(mdata, "host")
- }
- } else {
- // "If :authority is present, Host must be discarded" - A41
- delete(mdata, "host")
- }
-
- if frame.StreamEnded() {
- // s is just created by the caller. No lock needed.
- s.state = streamReadDone
- }
- if timeoutSet {
- s.ctx, s.cancel = context.WithTimeout(ctx, timeout)
- } else {
- s.ctx, s.cancel = context.WithCancel(ctx)
- }
-
- // Attach the received metadata to the context.
- if len(mdata) > 0 {
- s.ctx = metadata.NewIncomingContext(s.ctx, mdata)
- }
- t.mu.Lock()
- if t.state != reachable {
- t.mu.Unlock()
- s.cancel()
- return nil
- }
- if uint32(len(t.activeStreams)) >= t.maxStreams {
- t.mu.Unlock()
- t.controlBuf.put(&cleanupStream{
- streamID: streamID,
- rst: true,
- rstCode: http2.ErrCodeRefusedStream,
- onWrite: func() {},
- })
- s.cancel()
- return nil
- }
- if httpMethod != http.MethodPost {
- t.mu.Unlock()
- errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod)
- if t.logger.V(logLevel) {
- t.logger.Infof("Aborting the stream early: %v", errMsg)
- }
- t.controlBuf.put(&earlyAbortStream{
- httpStatus: http.StatusMethodNotAllowed,
- streamID: streamID,
- contentSubtype: s.contentSubtype,
- status: status.New(codes.Internal, errMsg),
- rst: !frame.StreamEnded(),
- })
- s.cancel()
- return nil
- }
- if t.inTapHandle != nil {
- var err error
- if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil {
- t.mu.Unlock()
- if t.logger.V(logLevel) {
- t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err)
- }
- stat, ok := status.FromError(err)
- if !ok {
- stat = status.New(codes.PermissionDenied, err.Error())
- }
- t.controlBuf.put(&earlyAbortStream{
- httpStatus: http.StatusOK,
- streamID: s.id,
- contentSubtype: s.contentSubtype,
- status: stat,
- rst: !frame.StreamEnded(),
- })
- return nil
- }
- }
- t.activeStreams[streamID] = s
- if len(t.activeStreams) == 1 {
- t.idle = time.Time{}
- }
- t.mu.Unlock()
- if channelz.IsOn() {
- t.channelz.SocketMetrics.StreamsStarted.Add(1)
- t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano())
- }
- s.requestRead = func(n int) {
- t.adjustWindow(s, uint32(n))
- }
- s.ctxDone = s.ctx.Done()
- s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
- s.trReader = &transportReader{
- reader: &recvBufferReader{
- ctx: s.ctx,
- ctxDone: s.ctxDone,
- recv: s.buf,
- },
- windowHandler: func(n int) {
- t.updateWindow(s, uint32(n))
- },
- }
- // Register the stream with loopy.
- t.controlBuf.put(®isterStream{
- streamID: s.id,
- wq: s.wq,
- })
- handle(s)
- return nil
-}
-
-// HandleStreams receives incoming streams using the given handler. This is
-// typically run in a separate goroutine.
-// traceCtx attaches trace to ctx and returns the new context.
-func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStream)) {
- defer func() {
- close(t.readerDone)
- <-t.loopyWriterDone
- }()
- for {
- t.controlBuf.throttle()
- frame, err := t.framer.fr.ReadFrame()
- atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
- if err != nil {
- if se, ok := err.(http2.StreamError); ok {
- if t.logger.V(logLevel) {
- t.logger.Warningf("Encountered http2.StreamError: %v", se)
- }
- t.mu.Lock()
- s := t.activeStreams[se.StreamID]
- t.mu.Unlock()
- if s != nil {
- t.closeStream(s, true, se.Code, false)
- } else {
- t.controlBuf.put(&cleanupStream{
- streamID: se.StreamID,
- rst: true,
- rstCode: se.Code,
- onWrite: func() {},
- })
- }
- continue
- }
- t.Close(err)
- return
- }
- switch frame := frame.(type) {
- case *http2.MetaHeadersFrame:
- if err := t.operateHeaders(ctx, frame, handle); err != nil {
- // Any error processing client headers, e.g. invalid stream ID,
- // is considered a protocol violation.
- t.controlBuf.put(&goAway{
- code: http2.ErrCodeProtocol,
- debugData: []byte(err.Error()),
- closeConn: err,
- })
- continue
- }
- case *http2.DataFrame:
- t.handleData(frame)
- case *http2.RSTStreamFrame:
- t.handleRSTStream(frame)
- case *http2.SettingsFrame:
- t.handleSettings(frame)
- case *http2.PingFrame:
- t.handlePing(frame)
- case *http2.WindowUpdateFrame:
- t.handleWindowUpdate(frame)
- case *http2.GoAwayFrame:
- // TODO: Handle GoAway from the client appropriately.
- default:
- if t.logger.V(logLevel) {
- t.logger.Infof("Received unsupported frame type %T", frame)
- }
- }
- }
-}
-
-func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) {
- t.mu.Lock()
- defer t.mu.Unlock()
- if t.activeStreams == nil {
- // The transport is closing.
- return nil, false
- }
- s, ok := t.activeStreams[f.Header().StreamID]
- if !ok {
- // The stream is already done.
- return nil, false
- }
- return s, true
-}
-
-// adjustWindow sends out extra window update over the initial window size
-// of stream if the application is requesting data larger in size than
-// the window.
-func (t *http2Server) adjustWindow(s *ServerStream, n uint32) {
- if w := s.fc.maybeAdjust(n); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
- }
-
-}
-
-// updateWindow adjusts the inbound quota for the stream and the transport.
-// Window updates will deliver to the controller for sending when
-// the cumulative quota exceeds the corresponding threshold.
-func (t *http2Server) updateWindow(s *ServerStream, n uint32) {
- if w := s.fc.onRead(n); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id,
- increment: w,
- })
- }
-}
-
-// updateFlowControl updates the incoming flow control windows
-// for the transport and the stream based on the current bdp
-// estimation.
-func (t *http2Server) updateFlowControl(n uint32) {
- t.mu.Lock()
- for _, s := range t.activeStreams {
- s.fc.newLimit(n)
- }
- t.initialWindowSize = int32(n)
- t.mu.Unlock()
- t.controlBuf.put(&outgoingWindowUpdate{
- streamID: 0,
- increment: t.fc.newLimit(n),
- })
- t.controlBuf.put(&outgoingSettings{
- ss: []http2.Setting{
- {
- ID: http2.SettingInitialWindowSize,
- Val: n,
- },
- },
- })
-
-}
-
-func (t *http2Server) handleData(f *http2.DataFrame) {
- size := f.Header().Length
- var sendBDPPing bool
- if t.bdpEst != nil {
- sendBDPPing = t.bdpEst.add(size)
- }
- // Decouple connection's flow control from application's read.
- // An update on connection's flow control should not depend on
- // whether user application has read the data or not. Such a
- // restriction is already imposed on the stream's flow control,
- // and therefore the sender will be blocked anyways.
- // Decoupling the connection flow control will prevent other
- // active(fast) streams from starving in presence of slow or
- // inactive streams.
- if w := t.fc.onData(size); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{
- streamID: 0,
- increment: w,
- })
- }
- if sendBDPPing {
- // Avoid excessive ping detection (e.g. in an L7 proxy)
- // by sending a window update prior to the BDP ping.
- if w := t.fc.reset(); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{
- streamID: 0,
- increment: w,
- })
- }
- t.controlBuf.put(bdpPing)
- }
- // Select the right stream to dispatch.
- s, ok := t.getStream(f)
- if !ok {
- return
- }
- if s.getState() == streamReadDone {
- t.closeStream(s, true, http2.ErrCodeStreamClosed, false)
- return
- }
- if size > 0 {
- if err := s.fc.onData(size); err != nil {
- t.closeStream(s, true, http2.ErrCodeFlowControl, false)
- return
- }
- if f.Header().Flags.Has(http2.FlagDataPadded) {
- if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
- }
- }
- // TODO(bradfitz, zhaoq): A copy is required here because there is no
- // guarantee f.Data() is consumed before the arrival of next frame.
- // Can this copy be eliminated?
- if len(f.Data()) > 0 {
- pool := t.bufferPool
- if pool == nil {
- // Note that this is only supposed to be nil in tests. Otherwise, stream is
- // always initialized with a BufferPool.
- pool = mem.DefaultBufferPool()
- }
- s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
- }
- }
- if f.StreamEnded() {
- // Received the end of stream from the client.
- s.compareAndSwapState(streamActive, streamReadDone)
- s.write(recvMsg{err: io.EOF})
- }
-}
-
-func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
- // If the stream is not deleted from the transport's active streams map, then do a regular close stream.
- if s, ok := t.getStream(f); ok {
- t.closeStream(s, false, 0, false)
- return
- }
- // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map.
- t.controlBuf.put(&cleanupStream{
- streamID: f.Header().StreamID,
- rst: false,
- rstCode: 0,
- onWrite: func() {},
- })
-}
-
-func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
- if f.IsAck() {
- return
- }
- var ss []http2.Setting
- var updateFuncs []func()
- f.ForeachSetting(func(s http2.Setting) error {
- switch s.ID {
- case http2.SettingMaxHeaderListSize:
- updateFuncs = append(updateFuncs, func() {
- t.maxSendHeaderListSize = new(uint32)
- *t.maxSendHeaderListSize = s.Val
- })
- default:
- ss = append(ss, s)
- }
- return nil
- })
- t.controlBuf.executeAndPut(func() bool {
- for _, f := range updateFuncs {
- f()
- }
- return true
- }, &incomingSettings{
- ss: ss,
- })
-}
-
-const (
- maxPingStrikes = 2
- defaultPingTimeout = 2 * time.Hour
-)
-
-func (t *http2Server) handlePing(f *http2.PingFrame) {
- if f.IsAck() {
- if f.Data == goAwayPing.data && t.drainEvent != nil {
- t.drainEvent.Fire()
- return
- }
- // Maybe it's a BDP ping.
- if t.bdpEst != nil {
- t.bdpEst.calculate(f.Data)
- }
- return
- }
- pingAck := &ping{ack: true}
- copy(pingAck.data[:], f.Data[:])
- t.controlBuf.put(pingAck)
-
- now := time.Now()
- defer func() {
- t.lastPingAt = now
- }()
- // A reset ping strikes means that we don't need to check for policy
- // violation for this ping and the pingStrikes counter should be set
- // to 0.
- if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) {
- t.pingStrikes = 0
- return
- }
- t.mu.Lock()
- ns := len(t.activeStreams)
- t.mu.Unlock()
- if ns < 1 && !t.kep.PermitWithoutStream {
- // Keepalive shouldn't be active thus, this new ping should
- // have come after at least defaultPingTimeout.
- if t.lastPingAt.Add(defaultPingTimeout).After(now) {
- t.pingStrikes++
- }
- } else {
- // Check if keepalive policy is respected.
- if t.lastPingAt.Add(t.kep.MinTime).After(now) {
- t.pingStrikes++
- }
- }
-
- if t.pingStrikes > maxPingStrikes {
- // Send goaway and close the connection.
- t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")})
- }
-}
-
-func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
- t.controlBuf.put(&incomingWindowUpdate{
- streamID: f.Header().StreamID,
- increment: f.Increment,
- })
-}
-
-func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField {
- for k, vv := range md {
- if isReservedHeader(k) {
- // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
- continue
- }
- for _, v := range vv {
- headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
- }
- }
- return headerFields
-}
-
-func (t *http2Server) checkForHeaderListSize(it any) bool {
- if t.maxSendHeaderListSize == nil {
- return true
- }
- hdrFrame := it.(*headerFrame)
- var sz int64
- for _, f := range hdrFrame.hf {
- if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
- if t.logger.V(logLevel) {
- t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
- }
- return false
- }
- }
- return true
-}
-
-func (t *http2Server) streamContextErr(s *ServerStream) error {
- select {
- case <-t.done:
- return ErrConnClosing
- default:
- }
- return ContextErr(s.ctx.Err())
-}
-
-// WriteHeader sends the header metadata md back to the client.
-func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error {
- s.hdrMu.Lock()
- defer s.hdrMu.Unlock()
- if s.getState() == streamDone {
- return t.streamContextErr(s)
- }
-
- if s.updateHeaderSent() {
- return ErrIllegalHeaderWrite
- }
-
- if md.Len() > 0 {
- if s.header.Len() > 0 {
- s.header = metadata.Join(s.header, md)
- } else {
- s.header = md
- }
- }
- if err := t.writeHeaderLocked(s); err != nil {
- switch e := err.(type) {
- case ConnectionError:
- return status.Error(codes.Unavailable, e.Desc)
- default:
- return status.Convert(err).Err()
- }
- }
- return nil
-}
-
-func (t *http2Server) setResetPingStrikes() {
- atomic.StoreUint32(&t.resetPingStrikes, 1)
-}
-
-func (t *http2Server) writeHeaderLocked(s *ServerStream) error {
- // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
- // first and create a slice of that exact size.
- headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
- headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
- headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
- if s.sendCompress != "" {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
- }
- headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
- hf := &headerFrame{
- streamID: s.id,
- hf: headerFields,
- endStream: false,
- onWrite: t.setResetPingStrikes,
- }
- success, err := t.controlBuf.executeAndPut(func() bool { return t.checkForHeaderListSize(hf) }, hf)
- if !success {
- if err != nil {
- return err
- }
- t.closeStream(s, true, http2.ErrCodeInternal, false)
- return ErrHeaderListSizeLimitViolation
- }
- for _, sh := range t.stats {
- // Note: Headers are compressed with hpack after this call returns.
- // No WireLength field is set here.
- outHeader := &stats.OutHeader{
- Header: s.header.Copy(),
- Compression: s.sendCompress,
- }
- sh.HandleRPC(s.Context(), outHeader)
- }
- return nil
-}
-
-// WriteStatus sends stream status to the client and terminates the stream.
-// There is no further I/O operations being able to perform on this stream.
-// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
-// OK is adopted.
-func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error {
- s.hdrMu.Lock()
- defer s.hdrMu.Unlock()
-
- if s.getState() == streamDone {
- return nil
- }
-
- // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
- // first and create a slice of that exact size.
- headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
- if !s.updateHeaderSent() { // No headers have been sent.
- if len(s.header) > 0 { // Send a separate header frame.
- if err := t.writeHeaderLocked(s); err != nil {
- return err
- }
- } else { // Send a trailer only response.
- headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
- headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
- }
- }
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
-
- if p := st.Proto(); p != nil && len(p.Details) > 0 {
- // Do not use the user's grpc-status-details-bin (if present) if we are
- // even attempting to set our own.
- delete(s.trailer, grpcStatusDetailsBinHeader)
- stBytes, err := proto.Marshal(p)
- if err != nil {
- // TODO: return error instead, when callers are able to handle it.
- t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err)
- } else {
- headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)})
- }
- }
-
- // Attach the trailer metadata.
- headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer)
- trailingHeader := &headerFrame{
- streamID: s.id,
- hf: headerFields,
- endStream: true,
- onWrite: t.setResetPingStrikes,
- }
-
- success, err := t.controlBuf.executeAndPut(func() bool {
- return t.checkForHeaderListSize(trailingHeader)
- }, nil)
- if !success {
- if err != nil {
- return err
- }
- t.closeStream(s, true, http2.ErrCodeInternal, false)
- return ErrHeaderListSizeLimitViolation
- }
- // Send a RST_STREAM after the trailers if the client has not already half-closed.
- rst := s.getState() == streamActive
- t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
- for _, sh := range t.stats {
- // Note: The trailer fields are compressed with hpack after this call returns.
- // No WireLength field is set here.
- sh.HandleRPC(s.Context(), &stats.OutTrailer{
- Trailer: s.trailer.Copy(),
- })
- }
- return nil
-}
-
-// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
-// is returns if it fails (e.g., framing error, transport error).
-func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error {
- reader := data.Reader()
-
- if !s.isHeaderSent() { // Headers haven't been written yet.
- if err := t.writeHeader(s, nil); err != nil {
- _ = reader.Close()
- return err
- }
- } else {
- // Writing headers checks for this condition.
- if s.getState() == streamDone {
- _ = reader.Close()
- return t.streamContextErr(s)
- }
- }
-
- df := &dataFrame{
- streamID: s.id,
- h: hdr,
- reader: reader,
- onEachWrite: t.setResetPingStrikes,
- }
- if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
- _ = reader.Close()
- return t.streamContextErr(s)
- }
- if err := t.controlBuf.put(df); err != nil {
- _ = reader.Close()
- return err
- }
- t.incrMsgSent()
- return nil
-}
-
-// keepalive running in a separate goroutine does the following:
-// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle.
-// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge.
-// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge.
-// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection
-// after an additional duration of keepalive.Timeout.
-func (t *http2Server) keepalive() {
- p := &ping{}
- // True iff a ping has been sent, and no data has been received since then.
- outstandingPing := false
- // Amount of time remaining before which we should receive an ACK for the
- // last sent ping.
- kpTimeoutLeft := time.Duration(0)
- // Records the last value of t.lastRead before we go block on the timer.
- // This is required to check for read activity since then.
- prevNano := time.Now().UnixNano()
- // Initialize the different timers to their default values.
- idleTimer := time.NewTimer(t.kp.MaxConnectionIdle)
- ageTimer := time.NewTimer(t.kp.MaxConnectionAge)
- kpTimer := time.NewTimer(t.kp.Time)
- defer func() {
- // We need to drain the underlying channel in these timers after a call
- // to Stop(), only if we are interested in resetting them. Clearly we
- // are not interested in resetting them here.
- idleTimer.Stop()
- ageTimer.Stop()
- kpTimer.Stop()
- }()
-
- for {
- select {
- case <-idleTimer.C:
- t.mu.Lock()
- idle := t.idle
- if idle.IsZero() { // The connection is non-idle.
- t.mu.Unlock()
- idleTimer.Reset(t.kp.MaxConnectionIdle)
- continue
- }
- val := t.kp.MaxConnectionIdle - time.Since(idle)
- t.mu.Unlock()
- if val <= 0 {
- // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
- // Gracefully close the connection.
- t.Drain("max_idle")
- return
- }
- idleTimer.Reset(val)
- case <-ageTimer.C:
- t.Drain("max_age")
- ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
- select {
- case <-ageTimer.C:
- // Close the connection after grace period.
- if t.logger.V(logLevel) {
- t.logger.Infof("Closing server transport due to maximum connection age")
- }
- t.controlBuf.put(closeConnection{})
- case <-t.done:
- }
- return
- case <-kpTimer.C:
- lastRead := atomic.LoadInt64(&t.lastRead)
- if lastRead > prevNano {
- // There has been read activity since the last time we were
- // here. Setup the timer to fire at kp.Time seconds from
- // lastRead time and continue.
- outstandingPing = false
- kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
- prevNano = lastRead
- continue
- }
- if outstandingPing && kpTimeoutLeft <= 0 {
- t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Timeout))
- return
- }
- if !outstandingPing {
- if channelz.IsOn() {
- t.channelz.SocketMetrics.KeepAlivesSent.Add(1)
- }
- t.controlBuf.put(p)
- kpTimeoutLeft = t.kp.Timeout
- outstandingPing = true
- }
- // The amount of time to sleep here is the minimum of kp.Time and
- // timeoutLeft. This will ensure that we wait only for kp.Time
- // before sending out the next ping (for cases where the ping is
- // acked).
- sleepDuration := min(t.kp.Time, kpTimeoutLeft)
- kpTimeoutLeft -= sleepDuration
- kpTimer.Reset(sleepDuration)
- case <-t.done:
- return
- }
- }
-}
-
-// Close starts shutting down the http2Server transport.
-// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
-// could cause some resource issue. Revisit this later.
-func (t *http2Server) Close(err error) {
- t.mu.Lock()
- if t.state == closing {
- t.mu.Unlock()
- return
- }
- if t.logger.V(logLevel) {
- t.logger.Infof("Closing: %v", err)
- }
- t.state = closing
- streams := t.activeStreams
- t.activeStreams = nil
- t.mu.Unlock()
- t.controlBuf.finish()
- close(t.done)
- if err := t.conn.Close(); err != nil && t.logger.V(logLevel) {
- t.logger.Infof("Error closing underlying net.Conn during Close: %v", err)
- }
- channelz.RemoveEntry(t.channelz.ID)
- // Cancel all active streams.
- for _, s := range streams {
- s.cancel()
- }
-}
-
-// deleteStream deletes the stream s from transport's active streams.
-func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) {
-
- t.mu.Lock()
- if _, ok := t.activeStreams[s.id]; ok {
- delete(t.activeStreams, s.id)
- if len(t.activeStreams) == 0 {
- t.idle = time.Now()
- }
- }
- t.mu.Unlock()
-
- if channelz.IsOn() {
- if eosReceived {
- t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
- } else {
- t.channelz.SocketMetrics.StreamsFailed.Add(1)
- }
- }
-}
-
-// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
-func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
- // In case stream sending and receiving are invoked in separate
- // goroutines (e.g., bi-directional streaming), cancel needs to be
- // called to interrupt the potential blocking on other goroutines.
- s.cancel()
-
- oldState := s.swapState(streamDone)
- if oldState == streamDone {
- // If the stream was already done, return.
- return
- }
-
- hdr.cleanup = &cleanupStream{
- streamID: s.id,
- rst: rst,
- rstCode: rstCode,
- onWrite: func() {
- t.deleteStream(s, eosReceived)
- },
- }
- t.controlBuf.put(hdr)
-}
-
-// closeStream clears the footprint of a stream when the stream is not needed any more.
-func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
- // In case stream sending and receiving are invoked in separate
- // goroutines (e.g., bi-directional streaming), cancel needs to be
- // called to interrupt the potential blocking on other goroutines.
- s.cancel()
-
- s.swapState(streamDone)
- t.deleteStream(s, eosReceived)
-
- t.controlBuf.put(&cleanupStream{
- streamID: s.id,
- rst: rst,
- rstCode: rstCode,
- onWrite: func() {},
- })
-}
-
-func (t *http2Server) Drain(debugData string) {
- t.mu.Lock()
- defer t.mu.Unlock()
- if t.drainEvent != nil {
- return
- }
- t.drainEvent = grpcsync.NewEvent()
- t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true})
-}
-
-var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
-
-// Handles outgoing GoAway and returns true if loopy needs to put itself
-// in draining mode.
-func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
- t.maxStreamMu.Lock()
- t.mu.Lock()
- if t.state == closing { // TODO(mmukhi): This seems unnecessary.
- t.mu.Unlock()
- t.maxStreamMu.Unlock()
- // The transport is closing.
- return false, ErrConnClosing
- }
- if !g.headsUp {
- // Stop accepting more streams now.
- t.state = draining
- sid := t.maxStreamID
- retErr := g.closeConn
- if len(t.activeStreams) == 0 {
- retErr = errors.New("second GOAWAY written and no active streams left to process")
- }
- t.mu.Unlock()
- t.maxStreamMu.Unlock()
- if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
- return false, err
- }
- t.framer.writer.Flush()
- if retErr != nil {
- return false, retErr
- }
- return true, nil
- }
- t.mu.Unlock()
- t.maxStreamMu.Unlock()
- // For a graceful close, send out a GoAway with stream ID of MaxUInt32,
- // Follow that with a ping and wait for the ack to come back or a timer
- // to expire. During this time accept new streams since they might have
- // originated before the GoAway reaches the client.
- // After getting the ack or timer expiration send out another GoAway this
- // time with an ID of the max stream server intends to process.
- if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil {
- return false, err
- }
- if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
- return false, err
- }
- go func() {
- timer := time.NewTimer(5 * time.Second)
- defer timer.Stop()
- select {
- case <-t.drainEvent.Done():
- case <-timer.C:
- case <-t.done:
- return
- }
- t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData})
- }()
- return false, nil
-}
-
-func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics {
- return &channelz.EphemeralSocketMetrics{
- LocalFlowControlWindow: int64(t.fc.getSize()),
- RemoteFlowControlWindow: t.getOutFlowWindow(),
- }
-}
-
-func (t *http2Server) incrMsgSent() {
- if channelz.IsOn() {
- t.channelz.SocketMetrics.MessagesSent.Add(1)
- t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1)
- }
-}
-
-func (t *http2Server) incrMsgRecv() {
- if channelz.IsOn() {
- t.channelz.SocketMetrics.MessagesReceived.Add(1)
- t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1)
- }
-}
-
-func (t *http2Server) getOutFlowWindow() int64 {
- resp := make(chan uint32, 1)
- timer := time.NewTimer(time.Second)
- defer timer.Stop()
- t.controlBuf.put(&outFlowControlSizeRequest{resp})
- select {
- case sz := <-resp:
- return int64(sz)
- case <-t.done:
- return -1
- case <-timer.C:
- return -2
- }
-}
-
-// Peer returns the peer of the transport.
-func (t *http2Server) Peer() *peer.Peer {
- return &peer.Peer{
- Addr: t.peer.Addr,
- LocalAddr: t.peer.LocalAddr,
- AuthInfo: t.peer.AuthInfo, // Can be nil
- }
-}
-
-func getJitter(v time.Duration) time.Duration {
- if v == infinity {
- return 0
- }
- // Generate a jitter between +/- 10% of the value.
- r := int64(v / 10)
- j := rand.Int64N(2*r) - r
- return time.Duration(j)
-}
-
-type connectionKey struct{}
-
-// GetConnection gets the connection from the context.
-func GetConnection(ctx context.Context) net.Conn {
- conn, _ := ctx.Value(connectionKey{}).(net.Conn)
- return conn
-}
-
-// SetConnection adds the connection to the context to be able to get
-// information about the destination ip and port for an incoming RPC. This also
-// allows any unary or streaming interceptors to see the connection.
-func SetConnection(ctx context.Context, conn net.Conn) context.Context {
- return context.WithValue(ctx, connectionKey{}, conn)
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
deleted file mode 100644
index 3613d7b64..000000000
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ /dev/null
@@ -1,468 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "bufio"
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "math"
- "net"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "sync"
- "time"
- "unicode/utf8"
-
- "golang.org/x/net/http2"
- "golang.org/x/net/http2/hpack"
- "google.golang.org/grpc/codes"
-)
-
-const (
- // http2MaxFrameLen specifies the max length of a HTTP2 frame.
- http2MaxFrameLen = 16384 // 16KB frame
- // https://httpwg.org/specs/rfc7540.html#SettingValues
- http2InitHeaderTableSize = 4096
-)
-
-var (
- clientPreface = []byte(http2.ClientPreface)
- http2ErrConvTab = map[http2.ErrCode]codes.Code{
- http2.ErrCodeNo: codes.Internal,
- http2.ErrCodeProtocol: codes.Internal,
- http2.ErrCodeInternal: codes.Internal,
- http2.ErrCodeFlowControl: codes.ResourceExhausted,
- http2.ErrCodeSettingsTimeout: codes.Internal,
- http2.ErrCodeStreamClosed: codes.Internal,
- http2.ErrCodeFrameSize: codes.Internal,
- http2.ErrCodeRefusedStream: codes.Unavailable,
- http2.ErrCodeCancel: codes.Canceled,
- http2.ErrCodeCompression: codes.Internal,
- http2.ErrCodeConnect: codes.Internal,
- http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
- http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
- http2.ErrCodeHTTP11Required: codes.Internal,
- }
- // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table.
- HTTPStatusConvTab = map[int]codes.Code{
- // 400 Bad Request - INTERNAL.
- http.StatusBadRequest: codes.Internal,
- // 401 Unauthorized - UNAUTHENTICATED.
- http.StatusUnauthorized: codes.Unauthenticated,
- // 403 Forbidden - PERMISSION_DENIED.
- http.StatusForbidden: codes.PermissionDenied,
- // 404 Not Found - UNIMPLEMENTED.
- http.StatusNotFound: codes.Unimplemented,
- // 429 Too Many Requests - UNAVAILABLE.
- http.StatusTooManyRequests: codes.Unavailable,
- // 502 Bad Gateway - UNAVAILABLE.
- http.StatusBadGateway: codes.Unavailable,
- // 503 Service Unavailable - UNAVAILABLE.
- http.StatusServiceUnavailable: codes.Unavailable,
- // 504 Gateway timeout - UNAVAILABLE.
- http.StatusGatewayTimeout: codes.Unavailable,
- }
-)
-
-var grpcStatusDetailsBinHeader = "grpc-status-details-bin"
-
-// isReservedHeader checks whether hdr belongs to HTTP2 headers
-// reserved by gRPC protocol. Any other headers are classified as the
-// user-specified metadata.
-func isReservedHeader(hdr string) bool {
- if hdr != "" && hdr[0] == ':' {
- return true
- }
- switch hdr {
- case "content-type",
- "user-agent",
- "grpc-message-type",
- "grpc-encoding",
- "grpc-message",
- "grpc-status",
- "grpc-timeout",
- // Intentionally exclude grpc-previous-rpc-attempts and
- // grpc-retry-pushback-ms, which are "reserved", but their API
- // intentionally works via metadata.
- "te":
- return true
- default:
- return false
- }
-}
-
-// isWhitelistedHeader checks whether hdr should be propagated into metadata
-// visible to users, even though it is classified as "reserved", above.
-func isWhitelistedHeader(hdr string) bool {
- switch hdr {
- case ":authority", "user-agent":
- return true
- default:
- return false
- }
-}
-
-const binHdrSuffix = "-bin"
-
-func encodeBinHeader(v []byte) string {
- return base64.RawStdEncoding.EncodeToString(v)
-}
-
-func decodeBinHeader(v string) ([]byte, error) {
- if len(v)%4 == 0 {
- // Input was padded, or padding was not necessary.
- return base64.StdEncoding.DecodeString(v)
- }
- return base64.RawStdEncoding.DecodeString(v)
-}
-
-func encodeMetadataHeader(k, v string) string {
- if strings.HasSuffix(k, binHdrSuffix) {
- return encodeBinHeader(([]byte)(v))
- }
- return v
-}
-
-func decodeMetadataHeader(k, v string) (string, error) {
- if strings.HasSuffix(k, binHdrSuffix) {
- b, err := decodeBinHeader(v)
- return string(b), err
- }
- return v, nil
-}
-
-type timeoutUnit uint8
-
-const (
- hour timeoutUnit = 'H'
- minute timeoutUnit = 'M'
- second timeoutUnit = 'S'
- millisecond timeoutUnit = 'm'
- microsecond timeoutUnit = 'u'
- nanosecond timeoutUnit = 'n'
-)
-
-func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) {
- switch u {
- case hour:
- return time.Hour, true
- case minute:
- return time.Minute, true
- case second:
- return time.Second, true
- case millisecond:
- return time.Millisecond, true
- case microsecond:
- return time.Microsecond, true
- case nanosecond:
- return time.Nanosecond, true
- default:
- }
- return
-}
-
-func decodeTimeout(s string) (time.Duration, error) {
- size := len(s)
- if size < 2 {
- return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
- }
- if size > 9 {
- // Spec allows for 8 digits plus the unit.
- return 0, fmt.Errorf("transport: timeout string is too long: %q", s)
- }
- unit := timeoutUnit(s[size-1])
- d, ok := timeoutUnitToDuration(unit)
- if !ok {
- return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s)
- }
- t, err := strconv.ParseInt(s[:size-1], 10, 64)
- if err != nil {
- return 0, err
- }
- const maxHours = math.MaxInt64 / int64(time.Hour)
- if d == time.Hour && t > maxHours {
- // This timeout would overflow math.MaxInt64; clamp it.
- return time.Duration(math.MaxInt64), nil
- }
- return d * time.Duration(t), nil
-}
-
-const (
- spaceByte = ' '
- tildeByte = '~'
- percentByte = '%'
-)
-
-// encodeGrpcMessage is used to encode status code in header field
-// "grpc-message". It does percent encoding and also replaces invalid utf-8
-// characters with Unicode replacement character.
-//
-// It checks to see if each individual byte in msg is an allowable byte, and
-// then either percent encoding or passing it through. When percent encoding,
-// the byte is converted into hexadecimal notation with a '%' prepended.
-func encodeGrpcMessage(msg string) string {
- if msg == "" {
- return ""
- }
- lenMsg := len(msg)
- for i := 0; i < lenMsg; i++ {
- c := msg[i]
- if !(c >= spaceByte && c <= tildeByte && c != percentByte) {
- return encodeGrpcMessageUnchecked(msg)
- }
- }
- return msg
-}
-
-func encodeGrpcMessageUnchecked(msg string) string {
- var sb strings.Builder
- for len(msg) > 0 {
- r, size := utf8.DecodeRuneInString(msg)
- for _, b := range []byte(string(r)) {
- if size > 1 {
- // If size > 1, r is not ascii. Always do percent encoding.
- fmt.Fprintf(&sb, "%%%02X", b)
- continue
- }
-
- // The for loop is necessary even if size == 1. r could be
- // utf8.RuneError.
- //
- // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
- if b >= spaceByte && b <= tildeByte && b != percentByte {
- sb.WriteByte(b)
- } else {
- fmt.Fprintf(&sb, "%%%02X", b)
- }
- }
- msg = msg[size:]
- }
- return sb.String()
-}
-
-// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage.
-func decodeGrpcMessage(msg string) string {
- if msg == "" {
- return ""
- }
- lenMsg := len(msg)
- for i := 0; i < lenMsg; i++ {
- if msg[i] == percentByte && i+2 < lenMsg {
- return decodeGrpcMessageUnchecked(msg)
- }
- }
- return msg
-}
-
-func decodeGrpcMessageUnchecked(msg string) string {
- var sb strings.Builder
- lenMsg := len(msg)
- for i := 0; i < lenMsg; i++ {
- c := msg[i]
- if c == percentByte && i+2 < lenMsg {
- parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8)
- if err != nil {
- sb.WriteByte(c)
- } else {
- sb.WriteByte(byte(parsed))
- i += 2
- }
- } else {
- sb.WriteByte(c)
- }
- }
- return sb.String()
-}
-
-type bufWriter struct {
- pool *sync.Pool
- buf []byte
- offset int
- batchSize int
- conn net.Conn
- err error
-}
-
-func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
- w := &bufWriter{
- batchSize: batchSize,
- conn: conn,
- pool: pool,
- }
- // this indicates that we should use non shared buf
- if pool == nil {
- w.buf = make([]byte, batchSize)
- }
- return w
-}
-
-func (w *bufWriter) Write(b []byte) (int, error) {
- if w.err != nil {
- return 0, w.err
- }
- if w.batchSize == 0 { // Buffer has been disabled.
- n, err := w.conn.Write(b)
- return n, toIOError(err)
- }
- if w.buf == nil {
- b := w.pool.Get().(*[]byte)
- w.buf = *b
- }
- written := 0
- for len(b) > 0 {
- copied := copy(w.buf[w.offset:], b)
- b = b[copied:]
- written += copied
- w.offset += copied
- if w.offset < w.batchSize {
- continue
- }
- if err := w.flushKeepBuffer(); err != nil {
- return written, err
- }
- }
- return written, nil
-}
-
-func (w *bufWriter) Flush() error {
- err := w.flushKeepBuffer()
- // Only release the buffer if we are in a "shared" mode
- if w.buf != nil && w.pool != nil {
- b := w.buf
- w.pool.Put(&b)
- w.buf = nil
- }
- return err
-}
-
-func (w *bufWriter) flushKeepBuffer() error {
- if w.err != nil {
- return w.err
- }
- if w.offset == 0 {
- return nil
- }
- _, w.err = w.conn.Write(w.buf[:w.offset])
- w.err = toIOError(w.err)
- w.offset = 0
- return w.err
-}
-
-type ioError struct {
- error
-}
-
-func (i ioError) Unwrap() error {
- return i.error
-}
-
-func isIOError(err error) bool {
- return errors.As(err, &ioError{})
-}
-
-func toIOError(err error) error {
- if err == nil {
- return nil
- }
- return ioError{error: err}
-}
-
-type framer struct {
- writer *bufWriter
- fr *http2.Framer
-}
-
-var writeBufferPoolMap = make(map[int]*sync.Pool)
-var writeBufferMutex sync.Mutex
-
-func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {
- if writeBufferSize < 0 {
- writeBufferSize = 0
- }
- var r io.Reader = conn
- if readBufferSize > 0 {
- r = bufio.NewReaderSize(r, readBufferSize)
- }
- var pool *sync.Pool
- if sharedWriteBuffer {
- pool = getWriteBufferPool(writeBufferSize)
- }
- w := newBufWriter(conn, writeBufferSize, pool)
- f := &framer{
- writer: w,
- fr: http2.NewFramer(w, r),
- }
- f.fr.SetMaxReadFrameSize(http2MaxFrameLen)
- // Opt-in to Frame reuse API on framer to reduce garbage.
- // Frames aren't safe to read from after a subsequent call to ReadFrame.
- f.fr.SetReuseFrames()
- f.fr.MaxHeaderListSize = maxHeaderListSize
- f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
- return f
-}
-
-func getWriteBufferPool(size int) *sync.Pool {
- writeBufferMutex.Lock()
- defer writeBufferMutex.Unlock()
- pool, ok := writeBufferPoolMap[size]
- if ok {
- return pool
- }
- pool = &sync.Pool{
- New: func() any {
- b := make([]byte, size)
- return &b
- },
- }
- writeBufferPoolMap[size] = pool
- return pool
-}
-
-// parseDialTarget returns the network and address to pass to dialer.
-func parseDialTarget(target string) (string, string) {
- net := "tcp"
- m1 := strings.Index(target, ":")
- m2 := strings.Index(target, ":/")
- // handle unix:addr which will fail with url.Parse
- if m1 >= 0 && m2 < 0 {
- if n := target[0:m1]; n == "unix" {
- return n, target[m1+1:]
- }
- }
- if m2 >= 0 {
- t, err := url.Parse(target)
- if err != nil {
- return net, target
- }
- scheme := t.Scheme
- addr := t.Path
- if scheme == "unix" {
- if addr == "" {
- addr = t.Host
- }
- return scheme, addr
- }
- }
- return net, target
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/logging.go b/vendor/google.golang.org/grpc/internal/transport/logging.go
deleted file mode 100644
index 42ed2b07a..000000000
--- a/vendor/google.golang.org/grpc/internal/transport/logging.go
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- *
- * Copyright 2023 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "fmt"
-
- "google.golang.org/grpc/grpclog"
- internalgrpclog "google.golang.org/grpc/internal/grpclog"
-)
-
-var logger = grpclog.Component("transport")
-
-func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger {
- return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p))
-}
-
-func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger {
- return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p))
-}
-
-func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger {
- return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p))
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
deleted file mode 100644
index c11b52782..000000000
--- a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package networktype declares the network type to be used in the default
-// dialer. Attribute of a resolver.Address.
-package networktype
-
-import (
- "google.golang.org/grpc/resolver"
-)
-
-// keyType is the key to use for storing State in Attributes.
-type keyType string
-
-const key = keyType("grpc.internal.transport.networktype")
-
-// Set returns a copy of the provided address with attributes containing networkType.
-func Set(address resolver.Address, networkType string) resolver.Address {
- address.Attributes = address.Attributes.WithValue(key, networkType)
- return address
-}
-
-// Get returns the network type in the resolver.Address and true, or "", false
-// if not present.
-func Get(address resolver.Address) (string, bool) {
- v := address.Attributes.Value(key)
- if v == nil {
- return "", false
- }
- return v.(string), true
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go
deleted file mode 100644
index d77384595..000000000
--- a/vendor/google.golang.org/grpc/internal/transport/proxy.go
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "bufio"
- "context"
- "encoding/base64"
- "fmt"
- "io"
- "net"
- "net/http"
- "net/http/httputil"
- "net/url"
-
- "google.golang.org/grpc/internal"
- "google.golang.org/grpc/internal/proxyattributes"
- "google.golang.org/grpc/resolver"
-)
-
-const proxyAuthHeaderKey = "Proxy-Authorization"
-
-// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
-// It's possible that this reader reads more than what's need for the response
-// and stores those bytes in the buffer. bufConn wraps the original net.Conn
-// and the bufio.Reader to make sure we don't lose the bytes in the buffer.
-type bufConn struct {
- net.Conn
- r io.Reader
-}
-
-func (c *bufConn) Read(b []byte) (int, error) {
- return c.r.Read(b)
-}
-
-func basicAuth(username, password string) string {
- auth := username + ":" + password
- return base64.StdEncoding.EncodeToString([]byte(auth))
-}
-
-func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, grpcUA string, opts proxyattributes.Options) (_ net.Conn, err error) {
- defer func() {
- if err != nil {
- conn.Close()
- }
- }()
-
- req := &http.Request{
- Method: http.MethodConnect,
- URL: &url.URL{Host: opts.ConnectAddr},
- Header: map[string][]string{"User-Agent": {grpcUA}},
- }
- if user := opts.User; user != nil {
- u := user.Username()
- p, _ := user.Password()
- req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p))
- }
- if err := sendHTTPRequest(ctx, req, conn); err != nil {
- return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
- }
-
- r := bufio.NewReader(conn)
- resp, err := http.ReadResponse(r, req)
- if err != nil {
- return nil, fmt.Errorf("reading server HTTP response: %v", err)
- }
- defer resp.Body.Close()
- if resp.StatusCode != http.StatusOK {
- dump, err := httputil.DumpResponse(resp, true)
- if err != nil {
- return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status)
- }
- return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
- }
- // The buffer could contain extra bytes from the target server, so we can't
- // discard it. However, in many cases where the server waits for the client
- // to send the first message (e.g. when TLS is being used), the buffer will
- // be empty, so we can avoid the overhead of reading through this buffer.
- if r.Buffered() != 0 {
- return &bufConn{Conn: conn, r: r}, nil
- }
- return conn, nil
-}
-
-// proxyDial establishes a TCP connection to the specified address and performs an HTTP CONNECT handshake.
-func proxyDial(ctx context.Context, addr resolver.Address, grpcUA string, opts proxyattributes.Options) (net.Conn, error) {
- conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", addr.Addr)
- if err != nil {
- return nil, err
- }
- return doHTTPConnectHandshake(ctx, conn, grpcUA, opts)
-}
-
-func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
- req = req.WithContext(ctx)
- if err := req.Write(conn); err != nil {
- return fmt.Errorf("failed to write the HTTP request: %v", err)
- }
- return nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
deleted file mode 100644
index a22a90151..000000000
--- a/vendor/google.golang.org/grpc/internal/transport/server_stream.go
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "context"
- "errors"
- "strings"
- "sync"
- "sync/atomic"
-
- "google.golang.org/grpc/mem"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
-)
-
-// ServerStream implements streaming functionality for a gRPC server.
-type ServerStream struct {
- *Stream // Embed for common stream functionality.
-
- st internalServerTransport
- ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance)
- cancel context.CancelFunc // invoked at the end of stream to cancel ctx.
-
- // Holds compressor names passed in grpc-accept-encoding metadata from the
- // client.
- clientAdvertisedCompressors string
- headerWireLength int
-
- // hdrMu protects outgoing header and trailer metadata.
- hdrMu sync.Mutex
- header metadata.MD // the outgoing header metadata. Updated by WriteHeader.
- headerSent atomic.Bool // atomically set when the headers are sent out.
-}
-
-// Read reads an n byte message from the input stream.
-func (s *ServerStream) Read(n int) (mem.BufferSlice, error) {
- b, err := s.Stream.read(n)
- if err == nil {
- s.st.incrMsgRecv()
- }
- return b, err
-}
-
-// SendHeader sends the header metadata for the given stream.
-func (s *ServerStream) SendHeader(md metadata.MD) error {
- return s.st.writeHeader(s, md)
-}
-
-// Write writes the hdr and data bytes to the output stream.
-func (s *ServerStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
- return s.st.write(s, hdr, data, opts)
-}
-
-// WriteStatus sends the status of a stream to the client. WriteStatus is
-// the final call made on a stream and always occurs.
-func (s *ServerStream) WriteStatus(st *status.Status) error {
- return s.st.writeStatus(s, st)
-}
-
-// isHeaderSent indicates whether headers have been sent.
-func (s *ServerStream) isHeaderSent() bool {
- return s.headerSent.Load()
-}
-
-// updateHeaderSent updates headerSent and returns true
-// if it was already set.
-func (s *ServerStream) updateHeaderSent() bool {
- return s.headerSent.Swap(true)
-}
-
-// RecvCompress returns the compression algorithm applied to the inbound
-// message. It is empty string if there is no compression applied.
-func (s *ServerStream) RecvCompress() string {
- return s.recvCompress
-}
-
-// SendCompress returns the send compressor name.
-func (s *ServerStream) SendCompress() string {
- return s.sendCompress
-}
-
-// ContentSubtype returns the content-subtype for a request. For example, a
-// content-subtype of "proto" will result in a content-type of
-// "application/grpc+proto". This will always be lowercase. See
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-func (s *ServerStream) ContentSubtype() string {
- return s.contentSubtype
-}
-
-// SetSendCompress sets the compression algorithm to the stream.
-func (s *ServerStream) SetSendCompress(name string) error {
- if s.isHeaderSent() || s.getState() == streamDone {
- return errors.New("transport: set send compressor called after headers sent or stream done")
- }
-
- s.sendCompress = name
- return nil
-}
-
-// SetContext sets the context of the stream. This will be deleted once the
-// stats handler callouts all move to gRPC layer.
-func (s *ServerStream) SetContext(ctx context.Context) {
- s.ctx = ctx
-}
-
-// ClientAdvertisedCompressors returns the compressor names advertised by the
-// client via grpc-accept-encoding header.
-func (s *ServerStream) ClientAdvertisedCompressors() []string {
- values := strings.Split(s.clientAdvertisedCompressors, ",")
- for i, v := range values {
- values[i] = strings.TrimSpace(v)
- }
- return values
-}
-
-// Header returns the header metadata of the stream. It returns the out header
-// after t.WriteHeader is called. It does not block and must not be called
-// until after WriteHeader.
-func (s *ServerStream) Header() (metadata.MD, error) {
- // Return the header in stream. It will be the out
- // header after t.WriteHeader is called.
- return s.header.Copy(), nil
-}
-
-// HeaderWireLength returns the size of the headers of the stream as received
-// from the wire.
-func (s *ServerStream) HeaderWireLength() int {
- return s.headerWireLength
-}
-
-// SetHeader sets the header metadata. This can be called multiple times.
-// This should not be called in parallel to other data writes.
-func (s *ServerStream) SetHeader(md metadata.MD) error {
- if md.Len() == 0 {
- return nil
- }
- if s.isHeaderSent() || s.getState() == streamDone {
- return ErrIllegalHeaderWrite
- }
- s.hdrMu.Lock()
- s.header = metadata.Join(s.header, md)
- s.hdrMu.Unlock()
- return nil
-}
-
-// SetTrailer sets the trailer metadata which will be sent with the RPC status
-// by the server. This can be called multiple times.
-// This should not be called parallel to other data writes.
-func (s *ServerStream) SetTrailer(md metadata.MD) error {
- if md.Len() == 0 {
- return nil
- }
- if s.getState() == streamDone {
- return ErrIllegalHeaderWrite
- }
- s.hdrMu.Lock()
- s.trailer = metadata.Join(s.trailer, md)
- s.hdrMu.Unlock()
- return nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
deleted file mode 100644
index af4a4aeab..000000000
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ /dev/null
@@ -1,692 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package transport defines and implements message oriented communication
-// channel to complete various transactions (e.g., an RPC). It is meant for
-// grpc-internal usage and is not intended to be imported directly by users.
-package transport
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "net"
- "sync"
- "sync/atomic"
- "time"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/mem"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
- "google.golang.org/grpc/tap"
-)
-
-const logLevel = 2
-
-// recvMsg represents the received msg from the transport. All transport
-// protocol specific info has been removed.
-type recvMsg struct {
- buffer mem.Buffer
- // nil: received some data
- // io.EOF: stream is completed. data is nil.
- // other non-nil error: transport failure. data is nil.
- err error
-}
-
-// recvBuffer is an unbounded channel of recvMsg structs.
-//
-// Note: recvBuffer differs from buffer.Unbounded only in the fact that it
-// holds a channel of recvMsg structs instead of objects implementing "item"
-// interface. recvBuffer is written to much more often and using strict recvMsg
-// structs helps avoid allocation in "recvBuffer.put"
-type recvBuffer struct {
- c chan recvMsg
- mu sync.Mutex
- backlog []recvMsg
- err error
-}
-
-func newRecvBuffer() *recvBuffer {
- b := &recvBuffer{
- c: make(chan recvMsg, 1),
- }
- return b
-}
-
-func (b *recvBuffer) put(r recvMsg) {
- b.mu.Lock()
- if b.err != nil {
- // drop the buffer on the floor. Since b.err is not nil, any subsequent reads
- // will always return an error, making this buffer inaccessible.
- r.buffer.Free()
- b.mu.Unlock()
- // An error had occurred earlier, don't accept more
- // data or errors.
- return
- }
- b.err = r.err
- if len(b.backlog) == 0 {
- select {
- case b.c <- r:
- b.mu.Unlock()
- return
- default:
- }
- }
- b.backlog = append(b.backlog, r)
- b.mu.Unlock()
-}
-
-func (b *recvBuffer) load() {
- b.mu.Lock()
- if len(b.backlog) > 0 {
- select {
- case b.c <- b.backlog[0]:
- b.backlog[0] = recvMsg{}
- b.backlog = b.backlog[1:]
- default:
- }
- }
- b.mu.Unlock()
-}
-
-// get returns the channel that receives a recvMsg in the buffer.
-//
-// Upon receipt of a recvMsg, the caller should call load to send another
-// recvMsg onto the channel if there is any.
-func (b *recvBuffer) get() <-chan recvMsg {
- return b.c
-}
-
-// recvBufferReader implements io.Reader interface to read the data from
-// recvBuffer.
-type recvBufferReader struct {
- closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata.
- ctx context.Context
- ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
- recv *recvBuffer
- last mem.Buffer // Stores the remaining data in the previous calls.
- err error
-}
-
-func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) {
- if r.err != nil {
- return 0, r.err
- }
- if r.last != nil {
- n, r.last = mem.ReadUnsafe(header, r.last)
- return n, nil
- }
- if r.closeStream != nil {
- n, r.err = r.readMessageHeaderClient(header)
- } else {
- n, r.err = r.readMessageHeader(header)
- }
- return n, r.err
-}
-
-// Read reads the next n bytes from last. If last is drained, it tries to read
-// additional data from recv. It blocks if there no additional data available in
-// recv. If Read returns any non-nil error, it will continue to return that
-// error.
-func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) {
- if r.err != nil {
- return nil, r.err
- }
- if r.last != nil {
- buf = r.last
- if r.last.Len() > n {
- buf, r.last = mem.SplitUnsafe(buf, n)
- } else {
- r.last = nil
- }
- return buf, nil
- }
- if r.closeStream != nil {
- buf, r.err = r.readClient(n)
- } else {
- buf, r.err = r.read(n)
- }
- return buf, r.err
-}
-
-func (r *recvBufferReader) readMessageHeader(header []byte) (n int, err error) {
- select {
- case <-r.ctxDone:
- return 0, ContextErr(r.ctx.Err())
- case m := <-r.recv.get():
- return r.readMessageHeaderAdditional(m, header)
- }
-}
-
-func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) {
- select {
- case <-r.ctxDone:
- return nil, ContextErr(r.ctx.Err())
- case m := <-r.recv.get():
- return r.readAdditional(m, n)
- }
-}
-
-func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err error) {
- // If the context is canceled, then closes the stream with nil metadata.
- // closeStream writes its error parameter to r.recv as a recvMsg.
- // r.readAdditional acts on that message and returns the necessary error.
- select {
- case <-r.ctxDone:
- // Note that this adds the ctx error to the end of recv buffer, and
- // reads from the head. This will delay the error until recv buffer is
- // empty, thus will delay ctx cancellation in Recv().
- //
- // It's done this way to fix a race between ctx cancel and trailer. The
- // race was, stream.Recv() may return ctx error if ctxDone wins the
- // race, but stream.Trailer() may return a non-nil md because the stream
- // was not marked as done when trailer is received. This closeStream
- // call will mark stream as done, thus fix the race.
- //
- // TODO: delaying ctx error seems like a unnecessary side effect. What
- // we really want is to mark the stream as done, and return ctx error
- // faster.
- r.closeStream(ContextErr(r.ctx.Err()))
- m := <-r.recv.get()
- return r.readMessageHeaderAdditional(m, header)
- case m := <-r.recv.get():
- return r.readMessageHeaderAdditional(m, header)
- }
-}
-
-func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) {
- // If the context is canceled, then closes the stream with nil metadata.
- // closeStream writes its error parameter to r.recv as a recvMsg.
- // r.readAdditional acts on that message and returns the necessary error.
- select {
- case <-r.ctxDone:
- // Note that this adds the ctx error to the end of recv buffer, and
- // reads from the head. This will delay the error until recv buffer is
- // empty, thus will delay ctx cancellation in Recv().
- //
- // It's done this way to fix a race between ctx cancel and trailer. The
- // race was, stream.Recv() may return ctx error if ctxDone wins the
- // race, but stream.Trailer() may return a non-nil md because the stream
- // was not marked as done when trailer is received. This closeStream
- // call will mark stream as done, thus fix the race.
- //
- // TODO: delaying ctx error seems like a unnecessary side effect. What
- // we really want is to mark the stream as done, and return ctx error
- // faster.
- r.closeStream(ContextErr(r.ctx.Err()))
- m := <-r.recv.get()
- return r.readAdditional(m, n)
- case m := <-r.recv.get():
- return r.readAdditional(m, n)
- }
-}
-
-func (r *recvBufferReader) readMessageHeaderAdditional(m recvMsg, header []byte) (n int, err error) {
- r.recv.load()
- if m.err != nil {
- if m.buffer != nil {
- m.buffer.Free()
- }
- return 0, m.err
- }
-
- n, r.last = mem.ReadUnsafe(header, m.buffer)
-
- return n, nil
-}
-
-func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) {
- r.recv.load()
- if m.err != nil {
- if m.buffer != nil {
- m.buffer.Free()
- }
- return nil, m.err
- }
-
- if m.buffer.Len() > n {
- m.buffer, r.last = mem.SplitUnsafe(m.buffer, n)
- }
-
- return m.buffer, nil
-}
-
-type streamState uint32
-
-const (
- streamActive streamState = iota
- streamWriteDone // EndStream sent
- streamReadDone // EndStream received
- streamDone // the entire stream is finished.
-)
-
-// Stream represents an RPC in the transport layer.
-type Stream struct {
- id uint32
- ctx context.Context // the associated context of the stream
- method string // the associated RPC method of the stream
- recvCompress string
- sendCompress string
- buf *recvBuffer
- trReader *transportReader
- fc *inFlow
- wq *writeQuota
-
- // Callback to state application's intentions to read data. This
- // is used to adjust flow control, if needed.
- requestRead func(int)
-
- state streamState
-
- // contentSubtype is the content-subtype for requests.
- // this must be lowercase or the behavior is undefined.
- contentSubtype string
-
- trailer metadata.MD // the key-value map of trailer metadata.
-}
-
-func (s *Stream) swapState(st streamState) streamState {
- return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st)))
-}
-
-func (s *Stream) compareAndSwapState(oldState, newState streamState) bool {
- return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState))
-}
-
-func (s *Stream) getState() streamState {
- return streamState(atomic.LoadUint32((*uint32)(&s.state)))
-}
-
-// Trailer returns the cached trailer metadata. Note that if it is not called
-// after the entire stream is done, it could return an empty MD.
-// It can be safely read only after stream has ended that is either read
-// or write have returned io.EOF.
-func (s *Stream) Trailer() metadata.MD {
- return s.trailer.Copy()
-}
-
-// Context returns the context of the stream.
-func (s *Stream) Context() context.Context {
- return s.ctx
-}
-
-// Method returns the method for the stream.
-func (s *Stream) Method() string {
- return s.method
-}
-
-func (s *Stream) write(m recvMsg) {
- s.buf.put(m)
-}
-
-// ReadMessageHeader reads data into the provided header slice from the stream.
-// It first checks if there was an error during a previous read operation and
-// returns it if present. It then requests a read operation for the length of
-// the header. It continues to read from the stream until the entire header
-// slice is filled or an error occurs. If an `io.EOF` error is encountered with
-// partially read data, it is converted to `io.ErrUnexpectedEOF` to indicate an
-// unexpected end of the stream. The method returns any error encountered during
-// the read process or nil if the header was successfully read.
-func (s *Stream) ReadMessageHeader(header []byte) (err error) {
- // Don't request a read if there was an error earlier
- if er := s.trReader.er; er != nil {
- return er
- }
- s.requestRead(len(header))
- for len(header) != 0 {
- n, err := s.trReader.ReadMessageHeader(header)
- header = header[n:]
- if len(header) == 0 {
- err = nil
- }
- if err != nil {
- if n > 0 && err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return err
- }
- }
- return nil
-}
-
-// Read reads n bytes from the wire for this stream.
-func (s *Stream) read(n int) (data mem.BufferSlice, err error) {
- // Don't request a read if there was an error earlier
- if er := s.trReader.er; er != nil {
- return nil, er
- }
- s.requestRead(n)
- for n != 0 {
- buf, err := s.trReader.Read(n)
- var bufLen int
- if buf != nil {
- bufLen = buf.Len()
- }
- n -= bufLen
- if n == 0 {
- err = nil
- }
- if err != nil {
- if bufLen > 0 && err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- data.Free()
- return nil, err
- }
- data = append(data, buf)
- }
- return data, nil
-}
-
-// transportReader reads all the data available for this Stream from the transport and
-// passes them into the decoder, which converts them into a gRPC message stream.
-// The error is io.EOF when the stream is done or another non-nil error if
-// the stream broke.
-type transportReader struct {
- reader *recvBufferReader
- // The handler to control the window update procedure for both this
- // particular stream and the associated transport.
- windowHandler func(int)
- er error
-}
-
-func (t *transportReader) ReadMessageHeader(header []byte) (int, error) {
- n, err := t.reader.ReadMessageHeader(header)
- if err != nil {
- t.er = err
- return 0, err
- }
- t.windowHandler(n)
- return n, nil
-}
-
-func (t *transportReader) Read(n int) (mem.Buffer, error) {
- buf, err := t.reader.Read(n)
- if err != nil {
- t.er = err
- return buf, err
- }
- t.windowHandler(buf.Len())
- return buf, nil
-}
-
-// GoString is implemented by Stream so context.String() won't
-// race when printing %#v.
-func (s *Stream) GoString() string {
- return fmt.Sprintf("", s, s.method)
-}
-
-// state of transport
-type transportState int
-
-const (
- reachable transportState = iota
- closing
- draining
-)
-
-// ServerConfig consists of all the configurations to establish a server transport.
-type ServerConfig struct {
- MaxStreams uint32
- ConnectionTimeout time.Duration
- Credentials credentials.TransportCredentials
- InTapHandle tap.ServerInHandle
- StatsHandlers []stats.Handler
- KeepaliveParams keepalive.ServerParameters
- KeepalivePolicy keepalive.EnforcementPolicy
- InitialWindowSize int32
- InitialConnWindowSize int32
- WriteBufferSize int
- ReadBufferSize int
- SharedWriteBuffer bool
- ChannelzParent *channelz.Server
- MaxHeaderListSize *uint32
- HeaderTableSize *uint32
- BufferPool mem.BufferPool
-}
-
-// ConnectOptions covers all relevant options for communicating with the server.
-type ConnectOptions struct {
- // UserAgent is the application user agent.
- UserAgent string
- // Dialer specifies how to dial a network address.
- Dialer func(context.Context, string) (net.Conn, error)
- // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors.
- FailOnNonTempDialError bool
- // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs.
- PerRPCCredentials []credentials.PerRPCCredentials
- // TransportCredentials stores the Authenticator required to setup a client
- // connection. Only one of TransportCredentials and CredsBundle is non-nil.
- TransportCredentials credentials.TransportCredentials
- // CredsBundle is the credentials bundle to be used. Only one of
- // TransportCredentials and CredsBundle is non-nil.
- CredsBundle credentials.Bundle
- // KeepaliveParams stores the keepalive parameters.
- KeepaliveParams keepalive.ClientParameters
- // StatsHandlers stores the handler for stats.
- StatsHandlers []stats.Handler
- // InitialWindowSize sets the initial window size for a stream.
- InitialWindowSize int32
- // InitialConnWindowSize sets the initial window size for a connection.
- InitialConnWindowSize int32
- // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire.
- WriteBufferSize int
- // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
- ReadBufferSize int
- // SharedWriteBuffer indicates whether connections should reuse write buffer
- SharedWriteBuffer bool
- // ChannelzParent sets the addrConn id which initiated the creation of this client transport.
- ChannelzParent *channelz.SubChannel
- // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
- MaxHeaderListSize *uint32
- // The mem.BufferPool to use when reading/writing to the wire.
- BufferPool mem.BufferPool
-}
-
-// WriteOptions provides additional hints and information for message
-// transmission.
-type WriteOptions struct {
- // Last indicates whether this write is the last piece for
- // this stream.
- Last bool
-}
-
-// CallHdr carries the information of a particular RPC.
-type CallHdr struct {
- // Host specifies the peer's host.
- Host string
-
- // Method specifies the operation to perform.
- Method string
-
- // SendCompress specifies the compression algorithm applied on
- // outbound message.
- SendCompress string
-
- // Creds specifies credentials.PerRPCCredentials for a call.
- Creds credentials.PerRPCCredentials
-
- // ContentSubtype specifies the content-subtype for a request. For example, a
- // content-subtype of "proto" will result in a content-type of
- // "application/grpc+proto". The value of ContentSubtype must be all
- // lowercase, otherwise the behavior is undefined. See
- // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
- // for more details.
- ContentSubtype string
-
- PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
-
- DoneFunc func() // called when the stream is finished
-}
-
-// ClientTransport is the common interface for all gRPC client-side transport
-// implementations.
-type ClientTransport interface {
- // Close tears down this transport. Once it returns, the transport
- // should not be accessed any more. The caller must make sure this
- // is called only once.
- Close(err error)
-
- // GracefulClose starts to tear down the transport: the transport will stop
- // accepting new RPCs and NewStream will return error. Once all streams are
- // finished, the transport will close.
- //
- // It does not block.
- GracefulClose()
-
- // NewStream creates a Stream for an RPC.
- NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error)
-
- // Error returns a channel that is closed when some I/O error
- // happens. Typically the caller should have a goroutine to monitor
- // this in order to take action (e.g., close the current transport
- // and create a new one) in error case. It should not return nil
- // once the transport is initiated.
- Error() <-chan struct{}
-
- // GoAway returns a channel that is closed when ClientTransport
- // receives the draining signal from the server (e.g., GOAWAY frame in
- // HTTP/2).
- GoAway() <-chan struct{}
-
- // GetGoAwayReason returns the reason why GoAway frame was received, along
- // with a human readable string with debug info.
- GetGoAwayReason() (GoAwayReason, string)
-
- // RemoteAddr returns the remote network address.
- RemoteAddr() net.Addr
-}
-
-// ServerTransport is the common interface for all gRPC server-side transport
-// implementations.
-//
-// Methods may be called concurrently from multiple goroutines, but
-// Write methods for a given Stream will be called serially.
-type ServerTransport interface {
- // HandleStreams receives incoming streams using the given handler.
- HandleStreams(context.Context, func(*ServerStream))
-
- // Close tears down the transport. Once it is called, the transport
- // should not be accessed any more. All the pending streams and their
- // handlers will be terminated asynchronously.
- Close(err error)
-
- // Peer returns the peer of the server transport.
- Peer() *peer.Peer
-
- // Drain notifies the client this ServerTransport stops accepting new RPCs.
- Drain(debugData string)
-}
-
-type internalServerTransport interface {
- ServerTransport
- writeHeader(s *ServerStream, md metadata.MD) error
- write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error
- writeStatus(s *ServerStream, st *status.Status) error
- incrMsgRecv()
-}
-
-// connectionErrorf creates an ConnectionError with the specified error description.
-func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError {
- return ConnectionError{
- Desc: fmt.Sprintf(format, a...),
- temp: temp,
- err: e,
- }
-}
-
-// ConnectionError is an error that results in the termination of the
-// entire connection and the retry of all the active streams.
-type ConnectionError struct {
- Desc string
- temp bool
- err error
-}
-
-func (e ConnectionError) Error() string {
- return fmt.Sprintf("connection error: desc = %q", e.Desc)
-}
-
-// Temporary indicates if this connection error is temporary or fatal.
-func (e ConnectionError) Temporary() bool {
- return e.temp
-}
-
-// Origin returns the original error of this connection error.
-func (e ConnectionError) Origin() error {
- // Never return nil error here.
- // If the original error is nil, return itself.
- if e.err == nil {
- return e
- }
- return e.err
-}
-
-// Unwrap returns the original error of this connection error or nil when the
-// origin is nil.
-func (e ConnectionError) Unwrap() error {
- return e.err
-}
-
-var (
- // ErrConnClosing indicates that the transport is closing.
- ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
- // errStreamDrain indicates that the stream is rejected because the
- // connection is draining. This could be caused by goaway or balancer
- // removing the address.
- errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
- // errStreamDone is returned from write at the client side to indicate application
- // layer of an error.
- errStreamDone = errors.New("the stream is done")
- // StatusGoAway indicates that the server sent a GOAWAY that included this
- // stream's ID in unprocessed RPCs.
- statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
-)
-
-// GoAwayReason contains the reason for the GoAway frame received.
-type GoAwayReason uint8
-
-const (
- // GoAwayInvalid indicates that no GoAway frame is received.
- GoAwayInvalid GoAwayReason = 0
- // GoAwayNoReason is the default value when GoAway frame is received.
- GoAwayNoReason GoAwayReason = 1
- // GoAwayTooManyPings indicates that a GoAway frame with
- // ErrCodeEnhanceYourCalm was received and that the debug data said
- // "too_many_pings".
- GoAwayTooManyPings GoAwayReason = 2
-)
-
-// ContextErr converts the error from context package into a status error.
-func ContextErr(err error) error {
- switch err {
- case context.DeadlineExceeded:
- return status.Error(codes.DeadlineExceeded, err.Error())
- case context.Canceled:
- return status.Error(codes.Canceled, err.Error())
- }
- return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
-}
diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go
deleted file mode 100644
index eb42b19fb..000000000
--- a/vendor/google.golang.org/grpc/keepalive/keepalive.go
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package keepalive defines configurable parameters for point-to-point
-// healthcheck.
-package keepalive
-
-import (
- "time"
-)
-
-// ClientParameters is used to set keepalive parameters on the client-side.
-// These configure how the client will actively probe to notice when a
-// connection is broken and send pings so intermediaries will be aware of the
-// liveness of the connection. Make sure these parameters are set in
-// coordination with the keepalive policy on the server, as incompatible
-// settings can result in closing of connection.
-type ClientParameters struct {
- // After a duration of this time if the client doesn't see any activity it
- // pings the server to see if the transport is still alive.
- // If set below 10s, a minimum value of 10s will be used instead.
- //
- // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5
- // minutes (which means the client shouldn't ping more frequently than every
- // 5 minutes).
- //
- // Though not ideal, it's not a strong requirement for Time to be less than
- // EnforcementPolicy.MinTime. Time will automatically double if the server
- // disconnects due to its enforcement policy.
- //
- // For more details, see
- // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md
- Time time.Duration
- // After having pinged for keepalive check, the client waits for a duration
- // of Timeout and if no activity is seen even after that the connection is
- // closed.
- //
- // If keepalive is enabled, and this value is not explicitly set, the default
- // is 20 seconds.
- Timeout time.Duration
- // If true, client sends keepalive pings even with no active RPCs. If false,
- // when there are no active RPCs, Time and Timeout will be ignored and no
- // keepalive pings will be sent.
- PermitWithoutStream bool
-}
-
-// ServerParameters is used to set keepalive and max-age parameters on the
-// server-side.
-type ServerParameters struct {
- // MaxConnectionIdle is a duration for the amount of time after which an
- // idle connection would be closed by sending a GoAway. Idleness duration is
- // defined since the most recent time the number of outstanding RPCs became
- // zero or the connection establishment.
- MaxConnectionIdle time.Duration // The current default value is infinity.
- // MaxConnectionAge is a duration for the maximum amount of time a
- // connection may exist before it will be closed by sending a GoAway. A
- // random jitter of +/-10% will be added to MaxConnectionAge to spread out
- // connection storms.
- MaxConnectionAge time.Duration // The current default value is infinity.
- // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after
- // which the connection will be forcibly closed.
- MaxConnectionAgeGrace time.Duration // The current default value is infinity.
- // After a duration of this time if the server doesn't see any activity it
- // pings the client to see if the transport is still alive.
- // If set below 1s, a minimum value of 1s will be used instead.
- Time time.Duration // The current default value is 2 hours.
- // After having pinged for keepalive check, the server waits for a duration
- // of Timeout and if no activity is seen even after that the connection is
- // closed.
- Timeout time.Duration // The current default value is 20 seconds.
-}
-
-// EnforcementPolicy is used to set keepalive enforcement policy on the
-// server-side. Server will close connection with a client that violates this
-// policy.
-type EnforcementPolicy struct {
- // MinTime is the minimum amount of time a client should wait before sending
- // a keepalive ping.
- MinTime time.Duration // The current default value is 5 minutes.
- // If true, server allows keepalive pings even when there are no active
- // streams(RPCs). If false, and client sends ping when there are no active
- // streams, server will send GOAWAY and close the connection.
- PermitWithoutStream bool // false by default.
-}
diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go
deleted file mode 100644
index c37c58c02..000000000
--- a/vendor/google.golang.org/grpc/mem/buffer_pool.go
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package mem
-
-import (
- "sort"
- "sync"
-
- "google.golang.org/grpc/internal"
-)
-
-// BufferPool is a pool of buffers that can be shared and reused, resulting in
-// decreased memory allocation.
-type BufferPool interface {
- // Get returns a buffer with specified length from the pool.
- Get(length int) *[]byte
-
- // Put returns a buffer to the pool.
- Put(*[]byte)
-}
-
-var defaultBufferPoolSizes = []int{
- 256,
- 4 << 10, // 4KB (go page size)
- 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
- 32 << 10, // 32KB (default buffer size for io.Copy)
- 1 << 20, // 1MB
-}
-
-var defaultBufferPool BufferPool
-
-func init() {
- defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...)
-
- internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) {
- defaultBufferPool = pool
- }
-
- internal.SetBufferPoolingThresholdForTesting = func(threshold int) {
- bufferPoolingThreshold = threshold
- }
-}
-
-// DefaultBufferPool returns the current default buffer pool. It is a BufferPool
-// created with NewBufferPool that uses a set of default sizes optimized for
-// expected workflows.
-func DefaultBufferPool() BufferPool {
- return defaultBufferPool
-}
-
-// NewTieredBufferPool returns a BufferPool implementation that uses multiple
-// underlying pools of the given pool sizes.
-func NewTieredBufferPool(poolSizes ...int) BufferPool {
- sort.Ints(poolSizes)
- pools := make([]*sizedBufferPool, len(poolSizes))
- for i, s := range poolSizes {
- pools[i] = newSizedBufferPool(s)
- }
- return &tieredBufferPool{
- sizedPools: pools,
- }
-}
-
-// tieredBufferPool implements the BufferPool interface with multiple tiers of
-// buffer pools for different sizes of buffers.
-type tieredBufferPool struct {
- sizedPools []*sizedBufferPool
- fallbackPool simpleBufferPool
-}
-
-func (p *tieredBufferPool) Get(size int) *[]byte {
- return p.getPool(size).Get(size)
-}
-
-func (p *tieredBufferPool) Put(buf *[]byte) {
- p.getPool(cap(*buf)).Put(buf)
-}
-
-func (p *tieredBufferPool) getPool(size int) BufferPool {
- poolIdx := sort.Search(len(p.sizedPools), func(i int) bool {
- return p.sizedPools[i].defaultSize >= size
- })
-
- if poolIdx == len(p.sizedPools) {
- return &p.fallbackPool
- }
-
- return p.sizedPools[poolIdx]
-}
-
-// sizedBufferPool is a BufferPool implementation that is optimized for specific
-// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size
-// of 16kb and a sizedBufferPool can be configured to only return buffers with a
-// capacity of 16kb. Note that however it does not support returning larger
-// buffers and in fact panics if such a buffer is requested. Because of this,
-// this BufferPool implementation is not meant to be used on its own and rather
-// is intended to be embedded in a tieredBufferPool such that Get is only
-// invoked when the required size is smaller than or equal to defaultSize.
-type sizedBufferPool struct {
- pool sync.Pool
- defaultSize int
-}
-
-func (p *sizedBufferPool) Get(size int) *[]byte {
- buf := p.pool.Get().(*[]byte)
- b := *buf
- clear(b[:cap(b)])
- *buf = b[:size]
- return buf
-}
-
-func (p *sizedBufferPool) Put(buf *[]byte) {
- if cap(*buf) < p.defaultSize {
- // Ignore buffers that are too small to fit in the pool. Otherwise, when
- // Get is called it will panic as it tries to index outside the bounds
- // of the buffer.
- return
- }
- p.pool.Put(buf)
-}
-
-func newSizedBufferPool(size int) *sizedBufferPool {
- return &sizedBufferPool{
- pool: sync.Pool{
- New: func() any {
- buf := make([]byte, size)
- return &buf
- },
- },
- defaultSize: size,
- }
-}
-
-var _ BufferPool = (*simpleBufferPool)(nil)
-
-// simpleBufferPool is an implementation of the BufferPool interface that
-// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to
-// acquire a buffer from the pool but if that buffer is too small, it returns it
-// to the pool and creates a new one.
-type simpleBufferPool struct {
- pool sync.Pool
-}
-
-func (p *simpleBufferPool) Get(size int) *[]byte {
- bs, ok := p.pool.Get().(*[]byte)
- if ok && cap(*bs) >= size {
- *bs = (*bs)[:size]
- return bs
- }
-
- // A buffer was pulled from the pool, but it is too small. Put it back in
- // the pool and create one large enough.
- if ok {
- p.pool.Put(bs)
- }
-
- b := make([]byte, size)
- return &b
-}
-
-func (p *simpleBufferPool) Put(buf *[]byte) {
- p.pool.Put(buf)
-}
-
-var _ BufferPool = NopBufferPool{}
-
-// NopBufferPool is a buffer pool that returns new buffers without pooling.
-type NopBufferPool struct{}
-
-// Get returns a buffer with specified length from the pool.
-func (NopBufferPool) Get(length int) *[]byte {
- b := make([]byte, length)
- return &b
-}
-
-// Put returns a buffer to the pool.
-func (NopBufferPool) Put(*[]byte) {
-}
diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go
deleted file mode 100644
index 65002e2cc..000000000
--- a/vendor/google.golang.org/grpc/mem/buffer_slice.go
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package mem
-
-import (
- "io"
-)
-
-const (
- // 32 KiB is what io.Copy uses.
- readAllBufSize = 32 * 1024
-)
-
-// BufferSlice offers a means to represent data that spans one or more Buffer
-// instances. A BufferSlice is meant to be immutable after creation, and methods
-// like Ref create and return copies of the slice. This is why all methods have
-// value receivers rather than pointer receivers.
-//
-// Note that any of the methods that read the underlying buffers such as Ref,
-// Len or CopyTo etc., will panic if any underlying buffers have already been
-// freed. It is recommended to not directly interact with any of the underlying
-// buffers directly, rather such interactions should be mediated through the
-// various methods on this type.
-//
-// By convention, any APIs that return (mem.BufferSlice, error) should reduce
-// the burden on the caller by never returning a mem.BufferSlice that needs to
-// be freed if the error is non-nil, unless explicitly stated.
-type BufferSlice []Buffer
-
-// Len returns the sum of the length of all the Buffers in this slice.
-//
-// # Warning
-//
-// Invoking the built-in len on a BufferSlice will return the number of buffers
-// in the slice, and *not* the value returned by this function.
-func (s BufferSlice) Len() int {
- var length int
- for _, b := range s {
- length += b.Len()
- }
- return length
-}
-
-// Ref invokes Ref on each buffer in the slice.
-func (s BufferSlice) Ref() {
- for _, b := range s {
- b.Ref()
- }
-}
-
-// Free invokes Buffer.Free() on each Buffer in the slice.
-func (s BufferSlice) Free() {
- for _, b := range s {
- b.Free()
- }
-}
-
-// CopyTo copies each of the underlying Buffer's data into the given buffer,
-// returning the number of bytes copied. Has the same semantics as the copy
-// builtin in that it will copy as many bytes as it can, stopping when either dst
-// is full or s runs out of data, returning the minimum of s.Len() and len(dst).
-func (s BufferSlice) CopyTo(dst []byte) int {
- off := 0
- for _, b := range s {
- off += copy(dst[off:], b.ReadOnlyData())
- }
- return off
-}
-
-// Materialize concatenates all the underlying Buffer's data into a single
-// contiguous buffer using CopyTo.
-func (s BufferSlice) Materialize() []byte {
- l := s.Len()
- if l == 0 {
- return nil
- }
- out := make([]byte, l)
- s.CopyTo(out)
- return out
-}
-
-// MaterializeToBuffer functions like Materialize except that it writes the data
-// to a single Buffer pulled from the given BufferPool.
-//
-// As a special case, if the input BufferSlice only actually has one Buffer, this
-// function simply increases the refcount before returning said Buffer. Freeing this
-// buffer won't release it until the BufferSlice is itself released.
-func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer {
- if len(s) == 1 {
- s[0].Ref()
- return s[0]
- }
- sLen := s.Len()
- if sLen == 0 {
- return emptyBuffer{}
- }
- buf := pool.Get(sLen)
- s.CopyTo(*buf)
- return NewBuffer(buf, pool)
-}
-
-// Reader returns a new Reader for the input slice after taking references to
-// each underlying buffer.
-func (s BufferSlice) Reader() Reader {
- s.Ref()
- return &sliceReader{
- data: s,
- len: s.Len(),
- }
-}
-
-// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface
-// with other parts systems. It also provides an additional convenience method
-// Remaining(), which returns the number of unread bytes remaining in the slice.
-// Buffers will be freed as they are read.
-type Reader interface {
- io.Reader
- io.ByteReader
- // Close frees the underlying BufferSlice and never returns an error. Subsequent
- // calls to Read will return (0, io.EOF).
- Close() error
- // Remaining returns the number of unread bytes remaining in the slice.
- Remaining() int
-}
-
-type sliceReader struct {
- data BufferSlice
- len int
- // The index into data[0].ReadOnlyData().
- bufferIdx int
-}
-
-func (r *sliceReader) Remaining() int {
- return r.len
-}
-
-func (r *sliceReader) Close() error {
- r.data.Free()
- r.data = nil
- r.len = 0
- return nil
-}
-
-func (r *sliceReader) freeFirstBufferIfEmpty() bool {
- if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) {
- return false
- }
-
- r.data[0].Free()
- r.data = r.data[1:]
- r.bufferIdx = 0
- return true
-}
-
-func (r *sliceReader) Read(buf []byte) (n int, _ error) {
- if r.len == 0 {
- return 0, io.EOF
- }
-
- for len(buf) != 0 && r.len != 0 {
- // Copy as much as possible from the first Buffer in the slice into the
- // given byte slice.
- data := r.data[0].ReadOnlyData()
- copied := copy(buf, data[r.bufferIdx:])
- r.len -= copied // Reduce len by the number of bytes copied.
- r.bufferIdx += copied // Increment the buffer index.
- n += copied // Increment the total number of bytes read.
- buf = buf[copied:] // Shrink the given byte slice.
-
- // If we have copied all the data from the first Buffer, free it and advance to
- // the next in the slice.
- r.freeFirstBufferIfEmpty()
- }
-
- return n, nil
-}
-
-func (r *sliceReader) ReadByte() (byte, error) {
- if r.len == 0 {
- return 0, io.EOF
- }
-
- // There may be any number of empty buffers in the slice, clear them all until a
- // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0.
- for r.freeFirstBufferIfEmpty() {
- }
-
- b := r.data[0].ReadOnlyData()[r.bufferIdx]
- r.len--
- r.bufferIdx++
- // Free the first buffer in the slice if the last byte was read
- r.freeFirstBufferIfEmpty()
- return b, nil
-}
-
-var _ io.Writer = (*writer)(nil)
-
-type writer struct {
- buffers *BufferSlice
- pool BufferPool
-}
-
-func (w *writer) Write(p []byte) (n int, err error) {
- b := Copy(p, w.pool)
- *w.buffers = append(*w.buffers, b)
- return b.Len(), nil
-}
-
-// NewWriter wraps the given BufferSlice and BufferPool to implement the
-// io.Writer interface. Every call to Write copies the contents of the given
-// buffer into a new Buffer pulled from the given pool and the Buffer is
-// added to the given BufferSlice.
-func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer {
- return &writer{buffers: buffers, pool: pool}
-}
-
-// ReadAll reads from r until an error or EOF and returns the data it read.
-// A successful call returns err == nil, not err == EOF. Because ReadAll is
-// defined to read from src until EOF, it does not treat an EOF from Read
-// as an error to be reported.
-//
-// Important: A failed call returns a non-nil error and may also return
-// partially read buffers. It is the responsibility of the caller to free the
-// BufferSlice returned, or its memory will not be reused.
-func ReadAll(r io.Reader, pool BufferPool) (BufferSlice, error) {
- var result BufferSlice
- if wt, ok := r.(io.WriterTo); ok {
- // This is more optimal since wt knows the size of chunks it wants to
- // write and, hence, we can allocate buffers of an optimal size to fit
- // them. E.g. might be a single big chunk, and we wouldn't chop it
- // into pieces.
- w := NewWriter(&result, pool)
- _, err := wt.WriteTo(w)
- return result, err
- }
-nextBuffer:
- for {
- buf := pool.Get(readAllBufSize)
- // We asked for 32KiB but may have been given a bigger buffer.
- // Use all of it if that's the case.
- *buf = (*buf)[:cap(*buf)]
- usedCap := 0
- for {
- n, err := r.Read((*buf)[usedCap:])
- usedCap += n
- if err != nil {
- if usedCap == 0 {
- // Nothing in this buf, put it back
- pool.Put(buf)
- } else {
- *buf = (*buf)[:usedCap]
- result = append(result, NewBuffer(buf, pool))
- }
- if err == io.EOF {
- err = nil
- }
- return result, err
- }
- if len(*buf) == usedCap {
- result = append(result, NewBuffer(buf, pool))
- continue nextBuffer
- }
- }
- }
-}
diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go
deleted file mode 100644
index ecbf0b9a7..000000000
--- a/vendor/google.golang.org/grpc/mem/buffers.go
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package mem provides utilities that facilitate memory reuse in byte slices
-// that are used as buffers.
-//
-// # Experimental
-//
-// Notice: All APIs in this package are EXPERIMENTAL and may be changed or
-// removed in a later release.
-package mem
-
-import (
- "fmt"
- "sync"
- "sync/atomic"
-)
-
-// A Buffer represents a reference counted piece of data (in bytes) that can be
-// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be
-// released by calling Free(), which invokes the free function given at creation
-// only after all references are released.
-//
-// Note that a Buffer is not safe for concurrent access and instead each
-// goroutine should use its own reference to the data, which can be acquired via
-// a call to Ref().
-//
-// Attempts to access the underlying data after releasing the reference to the
-// Buffer will panic.
-type Buffer interface {
- // ReadOnlyData returns the underlying byte slice. Note that it is undefined
- // behavior to modify the contents of this slice in any way.
- ReadOnlyData() []byte
- // Ref increases the reference counter for this Buffer.
- Ref()
- // Free decrements this Buffer's reference counter and frees the underlying
- // byte slice if the counter reaches 0 as a result of this call.
- Free()
- // Len returns the Buffer's size.
- Len() int
-
- split(n int) (left, right Buffer)
- read(buf []byte) (int, Buffer)
-}
-
-var (
- bufferPoolingThreshold = 1 << 10
-
- bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }}
- refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }}
-)
-
-// IsBelowBufferPoolingThreshold returns true if the given size is less than or
-// equal to the threshold for buffer pooling. This is used to determine whether
-// to pool buffers or allocate them directly.
-func IsBelowBufferPoolingThreshold(size int) bool {
- return size <= bufferPoolingThreshold
-}
-
-type buffer struct {
- origData *[]byte
- data []byte
- refs *atomic.Int32
- pool BufferPool
-}
-
-func newBuffer() *buffer {
- return bufferObjectPool.Get().(*buffer)
-}
-
-// NewBuffer creates a new Buffer from the given data, initializing the reference
-// counter to 1. The data will then be returned to the given pool when all
-// references to the returned Buffer are released. As a special case to avoid
-// additional allocations, if the given buffer pool is nil, the returned buffer
-// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the
-// underlying data is never freed.
-//
-// Note that the backing array of the given data is not copied.
-func NewBuffer(data *[]byte, pool BufferPool) Buffer {
- // Use the buffer's capacity instead of the length, otherwise buffers may
- // not be reused under certain conditions. For example, if a large buffer
- // is acquired from the pool, but fewer bytes than the buffering threshold
- // are written to it, the buffer will not be returned to the pool.
- if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) {
- return (SliceBuffer)(*data)
- }
- b := newBuffer()
- b.origData = data
- b.data = *data
- b.pool = pool
- b.refs = refObjectPool.Get().(*atomic.Int32)
- b.refs.Add(1)
- return b
-}
-
-// Copy creates a new Buffer from the given data, initializing the reference
-// counter to 1.
-//
-// It acquires a []byte from the given pool and copies over the backing array
-// of the given data. The []byte acquired from the pool is returned to the
-// pool when all references to the returned Buffer are released.
-func Copy(data []byte, pool BufferPool) Buffer {
- if IsBelowBufferPoolingThreshold(len(data)) {
- buf := make(SliceBuffer, len(data))
- copy(buf, data)
- return buf
- }
-
- buf := pool.Get(len(data))
- copy(*buf, data)
- return NewBuffer(buf, pool)
-}
-
-func (b *buffer) ReadOnlyData() []byte {
- if b.refs == nil {
- panic("Cannot read freed buffer")
- }
- return b.data
-}
-
-func (b *buffer) Ref() {
- if b.refs == nil {
- panic("Cannot ref freed buffer")
- }
- b.refs.Add(1)
-}
-
-func (b *buffer) Free() {
- if b.refs == nil {
- panic("Cannot free freed buffer")
- }
-
- refs := b.refs.Add(-1)
- switch {
- case refs > 0:
- return
- case refs == 0:
- if b.pool != nil {
- b.pool.Put(b.origData)
- }
-
- refObjectPool.Put(b.refs)
- b.origData = nil
- b.data = nil
- b.refs = nil
- b.pool = nil
- bufferObjectPool.Put(b)
- default:
- panic("Cannot free freed buffer")
- }
-}
-
-func (b *buffer) Len() int {
- return len(b.ReadOnlyData())
-}
-
-func (b *buffer) split(n int) (Buffer, Buffer) {
- if b.refs == nil {
- panic("Cannot split freed buffer")
- }
-
- b.refs.Add(1)
- split := newBuffer()
- split.origData = b.origData
- split.data = b.data[n:]
- split.refs = b.refs
- split.pool = b.pool
-
- b.data = b.data[:n]
-
- return b, split
-}
-
-func (b *buffer) read(buf []byte) (int, Buffer) {
- if b.refs == nil {
- panic("Cannot read freed buffer")
- }
-
- n := copy(buf, b.data)
- if n == len(b.data) {
- b.Free()
- return n, nil
- }
-
- b.data = b.data[n:]
- return n, b
-}
-
-func (b *buffer) String() string {
- return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData()))
-}
-
-// ReadUnsafe reads bytes from the given Buffer into the provided slice.
-// It does not perform safety checks.
-func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) {
- return buf.read(dst)
-}
-
-// SplitUnsafe modifies the receiver to point to the first n bytes while it
-// returns a new reference to the remaining bytes. The returned Buffer
-// functions just like a normal reference acquired using Ref().
-func SplitUnsafe(buf Buffer, n int) (left, right Buffer) {
- return buf.split(n)
-}
-
-type emptyBuffer struct{}
-
-func (e emptyBuffer) ReadOnlyData() []byte {
- return nil
-}
-
-func (e emptyBuffer) Ref() {}
-func (e emptyBuffer) Free() {}
-
-func (e emptyBuffer) Len() int {
- return 0
-}
-
-func (e emptyBuffer) split(int) (left, right Buffer) {
- return e, e
-}
-
-func (e emptyBuffer) read([]byte) (int, Buffer) {
- return 0, e
-}
-
-// SliceBuffer is a Buffer implementation that wraps a byte slice. It provides
-// methods for reading, splitting, and managing the byte slice.
-type SliceBuffer []byte
-
-// ReadOnlyData returns the byte slice.
-func (s SliceBuffer) ReadOnlyData() []byte { return s }
-
-// Ref is a noop implementation of Ref.
-func (s SliceBuffer) Ref() {}
-
-// Free is a noop implementation of Free.
-func (s SliceBuffer) Free() {}
-
-// Len is a noop implementation of Len.
-func (s SliceBuffer) Len() int { return len(s) }
-
-func (s SliceBuffer) split(n int) (left, right Buffer) {
- return s[:n], s[n:]
-}
-
-func (s SliceBuffer) read(buf []byte) (int, Buffer) {
- n := copy(buf, s)
- if n == len(s) {
- return n, nil
- }
- return n, s[n:]
-}
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
deleted file mode 100644
index d2e15253b..000000000
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package metadata define the structure of the metadata supported by gRPC library.
-// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
-// for more information about custom-metadata.
-package metadata // import "google.golang.org/grpc/metadata"
-
-import (
- "context"
- "fmt"
- "strings"
-
- "google.golang.org/grpc/internal"
-)
-
-func init() {
- internal.FromOutgoingContextRaw = fromOutgoingContextRaw
-}
-
-// DecodeKeyValue returns k, v, nil.
-//
-// Deprecated: use k and v directly instead.
-func DecodeKeyValue(k, v string) (string, string, error) {
- return k, v, nil
-}
-
-// MD is a mapping from metadata keys to values. Users should use the following
-// two convenience functions New and Pairs to generate MD.
-type MD map[string][]string
-
-// New creates an MD from a given key-value map.
-//
-// Only the following ASCII characters are allowed in keys:
-// - digits: 0-9
-// - uppercase letters: A-Z (normalized to lower)
-// - lowercase letters: a-z
-// - special characters: -_.
-//
-// Uppercase letters are automatically converted to lowercase.
-//
-// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
-// result in errors if set in metadata.
-func New(m map[string]string) MD {
- md := make(MD, len(m))
- for k, val := range m {
- key := strings.ToLower(k)
- md[key] = append(md[key], val)
- }
- return md
-}
-
-// Pairs returns an MD formed by the mapping of key, value ...
-// Pairs panics if len(kv) is odd.
-//
-// Only the following ASCII characters are allowed in keys:
-// - digits: 0-9
-// - uppercase letters: A-Z (normalized to lower)
-// - lowercase letters: a-z
-// - special characters: -_.
-//
-// Uppercase letters are automatically converted to lowercase.
-//
-// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
-// result in errors if set in metadata.
-func Pairs(kv ...string) MD {
- if len(kv)%2 == 1 {
- panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
- }
- md := make(MD, len(kv)/2)
- for i := 0; i < len(kv); i += 2 {
- key := strings.ToLower(kv[i])
- md[key] = append(md[key], kv[i+1])
- }
- return md
-}
-
-// Len returns the number of items in md.
-func (md MD) Len() int {
- return len(md)
-}
-
-// Copy returns a copy of md.
-func (md MD) Copy() MD {
- out := make(MD, len(md))
- for k, v := range md {
- out[k] = copyOf(v)
- }
- return out
-}
-
-// Get obtains the values for a given key.
-//
-// k is converted to lowercase before searching in md.
-func (md MD) Get(k string) []string {
- k = strings.ToLower(k)
- return md[k]
-}
-
-// Set sets the value of a given key with a slice of values.
-//
-// k is converted to lowercase before storing in md.
-func (md MD) Set(k string, vals ...string) {
- if len(vals) == 0 {
- return
- }
- k = strings.ToLower(k)
- md[k] = vals
-}
-
-// Append adds the values to key k, not overwriting what was already stored at
-// that key.
-//
-// k is converted to lowercase before storing in md.
-func (md MD) Append(k string, vals ...string) {
- if len(vals) == 0 {
- return
- }
- k = strings.ToLower(k)
- md[k] = append(md[k], vals...)
-}
-
-// Delete removes the values for a given key k which is converted to lowercase
-// before removing it from md.
-func (md MD) Delete(k string) {
- k = strings.ToLower(k)
- delete(md, k)
-}
-
-// Join joins any number of mds into a single MD.
-//
-// The order of values for each key is determined by the order in which the mds
-// containing those values are presented to Join.
-func Join(mds ...MD) MD {
- out := MD{}
- for _, md := range mds {
- for k, v := range md {
- out[k] = append(out[k], v...)
- }
- }
- return out
-}
-
-type mdIncomingKey struct{}
-type mdOutgoingKey struct{}
-
-// NewIncomingContext creates a new context with incoming md attached. md must
-// not be modified after calling this function.
-func NewIncomingContext(ctx context.Context, md MD) context.Context {
- return context.WithValue(ctx, mdIncomingKey{}, md)
-}
-
-// NewOutgoingContext creates a new context with outgoing md attached. If used
-// in conjunction with AppendToOutgoingContext, NewOutgoingContext will
-// overwrite any previously-appended metadata. md must not be modified after
-// calling this function.
-func NewOutgoingContext(ctx context.Context, md MD) context.Context {
- return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md})
-}
-
-// AppendToOutgoingContext returns a new context with the provided kv merged
-// with any existing metadata in the context. Please refer to the documentation
-// of Pairs for a description of kv.
-func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context {
- if len(kv)%2 == 1 {
- panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
- }
- md, _ := ctx.Value(mdOutgoingKey{}).(rawMD)
- added := make([][]string, len(md.added)+1)
- copy(added, md.added)
- kvCopy := make([]string, 0, len(kv))
- for i := 0; i < len(kv); i += 2 {
- kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
- }
- added[len(added)-1] = kvCopy
- return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
-}
-
-// FromIncomingContext returns the incoming metadata in ctx if it exists.
-//
-// All keys in the returned MD are lowercase.
-func FromIncomingContext(ctx context.Context) (MD, bool) {
- md, ok := ctx.Value(mdIncomingKey{}).(MD)
- if !ok {
- return nil, false
- }
- out := make(MD, len(md))
- for k, v := range md {
- // We need to manually convert all keys to lower case, because MD is a
- // map, and there's no guarantee that the MD attached to the context is
- // created using our helper functions.
- key := strings.ToLower(k)
- out[key] = copyOf(v)
- }
- return out, true
-}
-
-// ValueFromIncomingContext returns the metadata value corresponding to the metadata
-// key from the incoming metadata if it exists. Keys are matched in a case insensitive
-// manner.
-func ValueFromIncomingContext(ctx context.Context, key string) []string {
- md, ok := ctx.Value(mdIncomingKey{}).(MD)
- if !ok {
- return nil
- }
-
- if v, ok := md[key]; ok {
- return copyOf(v)
- }
- for k, v := range md {
- // Case insensitive comparison: MD is a map, and there's no guarantee
- // that the MD attached to the context is created using our helper
- // functions.
- if strings.EqualFold(k, key) {
- return copyOf(v)
- }
- }
- return nil
-}
-
-func copyOf(v []string) []string {
- vals := make([]string, len(v))
- copy(vals, v)
- return vals
-}
-
-// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
-//
-// Remember to perform strings.ToLower on the keys, for both the returned MD (MD
-// is a map, there's no guarantee it's created using our helper functions) and
-// the extra kv pairs (AppendToOutgoingContext doesn't turn them into
-// lowercase).
-func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
- raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
- if !ok {
- return nil, nil, false
- }
-
- return raw.md, raw.added, true
-}
-
-// FromOutgoingContext returns the outgoing metadata in ctx if it exists.
-//
-// All keys in the returned MD are lowercase.
-func FromOutgoingContext(ctx context.Context) (MD, bool) {
- raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
- if !ok {
- return nil, false
- }
-
- mdSize := len(raw.md)
- for i := range raw.added {
- mdSize += len(raw.added[i]) / 2
- }
-
- out := make(MD, mdSize)
- for k, v := range raw.md {
- // We need to manually convert all keys to lower case, because MD is a
- // map, and there's no guarantee that the MD attached to the context is
- // created using our helper functions.
- key := strings.ToLower(k)
- out[key] = copyOf(v)
- }
- for _, added := range raw.added {
- if len(added)%2 == 1 {
- panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added)))
- }
-
- for i := 0; i < len(added); i += 2 {
- key := strings.ToLower(added[i])
- out[key] = append(out[key], added[i+1])
- }
- }
- return out, ok
-}
-
-type rawMD struct {
- md MD
- added [][]string
-}
diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go
deleted file mode 100644
index 499a49c8c..000000000
--- a/vendor/google.golang.org/grpc/peer/peer.go
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package peer defines various peer information associated with RPCs and
-// corresponding utils.
-package peer
-
-import (
- "context"
- "fmt"
- "net"
- "strings"
-
- "google.golang.org/grpc/credentials"
-)
-
-// Peer contains the information of the peer for an RPC, such as the address
-// and authentication information.
-type Peer struct {
- // Addr is the peer address.
- Addr net.Addr
- // LocalAddr is the local address.
- LocalAddr net.Addr
- // AuthInfo is the authentication information of the transport.
- // It is nil if there is no transport security being used.
- AuthInfo credentials.AuthInfo
-}
-
-// String ensures the Peer types implements the Stringer interface in order to
-// allow to print a context with a peerKey value effectively.
-func (p *Peer) String() string {
- if p == nil {
- return "Peer"
- }
- sb := &strings.Builder{}
- sb.WriteString("Peer{")
- if p.Addr != nil {
- fmt.Fprintf(sb, "Addr: '%s', ", p.Addr.String())
- } else {
- fmt.Fprintf(sb, "Addr: , ")
- }
- if p.LocalAddr != nil {
- fmt.Fprintf(sb, "LocalAddr: '%s', ", p.LocalAddr.String())
- } else {
- fmt.Fprintf(sb, "LocalAddr: , ")
- }
- if p.AuthInfo != nil {
- fmt.Fprintf(sb, "AuthInfo: '%s'", p.AuthInfo.AuthType())
- } else {
- fmt.Fprintf(sb, "AuthInfo: ")
- }
- sb.WriteString("}")
-
- return sb.String()
-}
-
-type peerKey struct{}
-
-// NewContext creates a new context with peer information attached.
-func NewContext(ctx context.Context, p *Peer) context.Context {
- return context.WithValue(ctx, peerKey{}, p)
-}
-
-// FromContext returns the peer information in ctx if it exists.
-func FromContext(ctx context.Context) (p *Peer, ok bool) {
- p, ok = ctx.Value(peerKey{}).(*Peer)
- return
-}
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
deleted file mode 100644
index a2d2a798d..000000000
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
- "fmt"
- "io"
- "sync/atomic"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/internal/channelz"
- istatus "google.golang.org/grpc/internal/status"
- "google.golang.org/grpc/internal/transport"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
-)
-
-// pickerGeneration stores a picker and a channel used to signal that a picker
-// newer than this one is available.
-type pickerGeneration struct {
- // picker is the picker produced by the LB policy. May be nil if a picker
- // has never been produced.
- picker balancer.Picker
- // blockingCh is closed when the picker has been invalidated because there
- // is a new one available.
- blockingCh chan struct{}
-}
-
-// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
-// actions and unblock when there's a picker update.
-type pickerWrapper struct {
- // If pickerGen holds a nil pointer, the pickerWrapper is closed.
- pickerGen atomic.Pointer[pickerGeneration]
- statsHandlers []stats.Handler // to record blocking picker calls
-}
-
-func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper {
- pw := &pickerWrapper{
- statsHandlers: statsHandlers,
- }
- pw.pickerGen.Store(&pickerGeneration{
- blockingCh: make(chan struct{}),
- })
- return pw
-}
-
-// updatePicker is called by UpdateState calls from the LB policy. It
-// unblocks all blocked pick.
-func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
- old := pw.pickerGen.Swap(&pickerGeneration{
- picker: p,
- blockingCh: make(chan struct{}),
- })
- close(old.blockingCh)
-}
-
-// doneChannelzWrapper performs the following:
-// - increments the calls started channelz counter
-// - wraps the done function in the passed in result to increment the calls
-// failed or calls succeeded channelz counter before invoking the actual
-// done function.
-func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) {
- ac := acbw.ac
- ac.incrCallsStarted()
- done := result.Done
- result.Done = func(b balancer.DoneInfo) {
- if b.Err != nil && b.Err != io.EOF {
- ac.incrCallsFailed()
- } else {
- ac.incrCallsSucceeded()
- }
- if done != nil {
- done(b)
- }
- }
-}
-
-// pick returns the transport that will be used for the RPC.
-// It may block in the following cases:
-// - there's no picker
-// - the current picker returns ErrNoSubConnAvailable
-// - the current picker returns other errors and failfast is false.
-// - the subConn returned by the current picker is not READY
-// When one of these situations happens, pick blocks until the picker gets updated.
-func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) {
- var ch chan struct{}
-
- var lastPickErr error
-
- for {
- pg := pw.pickerGen.Load()
- if pg == nil {
- return nil, balancer.PickResult{}, ErrClientConnClosing
- }
- if pg.picker == nil {
- ch = pg.blockingCh
- }
- if ch == pg.blockingCh {
- // This could happen when either:
- // - pw.picker is nil (the previous if condition), or
- // - we have already called pick on the current picker.
- select {
- case <-ctx.Done():
- var errStr string
- if lastPickErr != nil {
- errStr = "latest balancer error: " + lastPickErr.Error()
- } else {
- errStr = fmt.Sprintf("%v while waiting for connections to become ready", ctx.Err())
- }
- switch ctx.Err() {
- case context.DeadlineExceeded:
- return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr)
- case context.Canceled:
- return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr)
- }
- case <-ch:
- }
- continue
- }
-
- // If the channel is set, it means that the pick call had to wait for a
- // new picker at some point. Either it's the first iteration and this
- // function received the first picker, or a picker errored with
- // ErrNoSubConnAvailable or errored with failfast set to false, which
- // will trigger a continue to the next iteration. In the first case this
- // conditional will hit if this call had to block (the channel is set).
- // In the second case, the only way it will get to this conditional is
- // if there is a new picker.
- if ch != nil {
- for _, sh := range pw.statsHandlers {
- sh.HandleRPC(ctx, &stats.PickerUpdated{})
- }
- }
-
- ch = pg.blockingCh
- p := pg.picker
-
- pickResult, err := p.Pick(info)
- if err != nil {
- if err == balancer.ErrNoSubConnAvailable {
- continue
- }
- if st, ok := status.FromError(err); ok {
- // Status error: end the RPC unconditionally with this status.
- // First restrict the code to the list allowed by gRFC A54.
- if istatus.IsRestrictedControlPlaneCode(st) {
- err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err)
- }
- return nil, balancer.PickResult{}, dropError{error: err}
- }
- // For all other errors, wait for ready RPCs should block and other
- // RPCs should fail with unavailable.
- if !failfast {
- lastPickErr = err
- continue
- }
- return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error())
- }
-
- acbw, ok := pickResult.SubConn.(*acBalancerWrapper)
- if !ok {
- logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn)
- continue
- }
- if t := acbw.ac.getReadyTransport(); t != nil {
- if channelz.IsOn() {
- doneChannelzWrapper(acbw, &pickResult)
- return t, pickResult, nil
- }
- return t, pickResult, nil
- }
- if pickResult.Done != nil {
- // Calling done with nil error, no bytes sent and no bytes received.
- // DoneInfo with default value works.
- pickResult.Done(balancer.DoneInfo{})
- }
- logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
- // If ok == false, ac.state is not READY.
- // A valid picker always returns READY subConn. This means the state of ac
- // just changed, and picker will be updated shortly.
- // continue back to the beginning of the for loop to repick.
- }
-}
-
-func (pw *pickerWrapper) close() {
- old := pw.pickerGen.Swap(nil)
- close(old.blockingCh)
-}
-
-// reset clears the pickerWrapper and prepares it for being used again when idle
-// mode is exited.
-func (pw *pickerWrapper) reset() {
- old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})})
- close(old.blockingCh)
-}
-
-// dropError is a wrapper error that indicates the LB policy wishes to drop the
-// RPC and not retry it.
-type dropError struct {
- error
-}
diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go
deleted file mode 100644
index ee0ff969a..000000000
--- a/vendor/google.golang.org/grpc/preloader.go
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- *
- * Copyright 2019 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/mem"
- "google.golang.org/grpc/status"
-)
-
-// PreparedMsg is responsible for creating a Marshalled and Compressed object.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type PreparedMsg struct {
- // Struct for preparing msg before sending them
- encodedData mem.BufferSlice
- hdr []byte
- payload mem.BufferSlice
- pf payloadFormat
-}
-
-// Encode marshalls and compresses the message using the codec and compressor for the stream.
-func (p *PreparedMsg) Encode(s Stream, msg any) error {
- ctx := s.Context()
- rpcInfo, ok := rpcInfoFromContext(ctx)
- if !ok {
- return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo")
- }
-
- // check if the context has the relevant information to prepareMsg
- if rpcInfo.preloaderInfo == nil {
- return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil")
- }
- if rpcInfo.preloaderInfo.codec == nil {
- return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil")
- }
-
- // prepare the msg
- data, err := encode(rpcInfo.preloaderInfo.codec, msg)
- if err != nil {
- return err
- }
-
- materializedData := data.Materialize()
- data.Free()
- p.encodedData = mem.BufferSlice{mem.SliceBuffer(materializedData)}
-
- // TODO: it should be possible to grab the bufferPool from the underlying
- // stream implementation with a type cast to its actual type (such as
- // addrConnStream) and accessing the buffer pool directly.
- var compData mem.BufferSlice
- compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool())
- if err != nil {
- return err
- }
-
- if p.pf.isCompressed() {
- materializedCompData := compData.Materialize()
- compData.Free()
- compData = mem.BufferSlice{mem.SliceBuffer(materializedCompData)}
- }
-
- p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf)
-
- return nil
-}
diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
deleted file mode 100644
index ef3d6ed6c..000000000
--- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package dns implements a dns resolver to be installed as the default resolver
-// in grpc.
-package dns
-
-import (
- "time"
-
- "google.golang.org/grpc/internal/resolver/dns"
- "google.golang.org/grpc/resolver"
-)
-
-// SetResolvingTimeout sets the maximum duration for DNS resolution requests.
-//
-// This function affects the global timeout used by all channels using the DNS
-// name resolver scheme.
-//
-// It must be called only at application startup, before any gRPC calls are
-// made. Modifying this value after initialization is not thread-safe.
-//
-// The default value is 30 seconds. Setting the timeout too low may result in
-// premature timeouts during resolution, while setting it too high may lead to
-// unnecessary delays in service discovery. Choose a value appropriate for your
-// specific needs and network environment.
-func SetResolvingTimeout(timeout time.Duration) {
- dns.ResolvingTimeout = timeout
-}
-
-// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
-//
-// Deprecated: import grpc and use resolver.Get("dns") instead.
-func NewBuilder() resolver.Builder {
- return dns.NewBuilder()
-}
-
-// SetMinResolutionInterval sets the default minimum interval at which DNS
-// re-resolutions are allowed. This helps to prevent excessive re-resolution.
-//
-// It must be called only at application startup, before any gRPC calls are
-// made. Modifying this value after initialization is not thread-safe.
-func SetMinResolutionInterval(d time.Duration) {
- dns.MinResolutionInterval = d
-}
diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go
deleted file mode 100644
index 975b49970..000000000
--- a/vendor/google.golang.org/grpc/resolver/map.go
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- *
- * Copyright 2021 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package resolver
-
-import (
- "encoding/base64"
- "sort"
- "strings"
-)
-
-type addressMapEntry struct {
- addr Address
- value any
-}
-
-// AddressMap is a map of addresses to arbitrary values taking into account
-// Attributes. BalancerAttributes are ignored, as are Metadata and Type.
-// Multiple accesses may not be performed concurrently. Must be created via
-// NewAddressMap; do not construct directly.
-type AddressMap struct {
- // The underlying map is keyed by an Address with fields that we don't care
- // about being set to their zero values. The only fields that we care about
- // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to
- // distinguish between addresses with same `Addr` and `ServerName`, but
- // different `Attributes`, we cannot store the `Attributes` in the map key.
- //
- // The comparison operation for structs work as follows:
- // Struct values are comparable if all their fields are comparable. Two
- // struct values are equal if their corresponding non-blank fields are equal.
- //
- // The value type of the map contains a slice of addresses which match the key
- // in their `Addr` and `ServerName` fields and contain the corresponding value
- // associated with them.
- m map[Address]addressMapEntryList
-}
-
-func toMapKey(addr *Address) Address {
- return Address{Addr: addr.Addr, ServerName: addr.ServerName}
-}
-
-type addressMapEntryList []*addressMapEntry
-
-// NewAddressMap creates a new AddressMap.
-func NewAddressMap() *AddressMap {
- return &AddressMap{m: make(map[Address]addressMapEntryList)}
-}
-
-// find returns the index of addr in the addressMapEntry slice, or -1 if not
-// present.
-func (l addressMapEntryList) find(addr Address) int {
- for i, entry := range l {
- // Attributes are the only thing to match on here, since `Addr` and
- // `ServerName` are already equal.
- if entry.addr.Attributes.Equal(addr.Attributes) {
- return i
- }
- }
- return -1
-}
-
-// Get returns the value for the address in the map, if present.
-func (a *AddressMap) Get(addr Address) (value any, ok bool) {
- addrKey := toMapKey(&addr)
- entryList := a.m[addrKey]
- if entry := entryList.find(addr); entry != -1 {
- return entryList[entry].value, true
- }
- return nil, false
-}
-
-// Set updates or adds the value to the address in the map.
-func (a *AddressMap) Set(addr Address, value any) {
- addrKey := toMapKey(&addr)
- entryList := a.m[addrKey]
- if entry := entryList.find(addr); entry != -1 {
- entryList[entry].value = value
- return
- }
- a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value})
-}
-
-// Delete removes addr from the map.
-func (a *AddressMap) Delete(addr Address) {
- addrKey := toMapKey(&addr)
- entryList := a.m[addrKey]
- entry := entryList.find(addr)
- if entry == -1 {
- return
- }
- if len(entryList) == 1 {
- entryList = nil
- } else {
- copy(entryList[entry:], entryList[entry+1:])
- entryList = entryList[:len(entryList)-1]
- }
- a.m[addrKey] = entryList
-}
-
-// Len returns the number of entries in the map.
-func (a *AddressMap) Len() int {
- ret := 0
- for _, entryList := range a.m {
- ret += len(entryList)
- }
- return ret
-}
-
-// Keys returns a slice of all current map keys.
-func (a *AddressMap) Keys() []Address {
- ret := make([]Address, 0, a.Len())
- for _, entryList := range a.m {
- for _, entry := range entryList {
- ret = append(ret, entry.addr)
- }
- }
- return ret
-}
-
-// Values returns a slice of all current map values.
-func (a *AddressMap) Values() []any {
- ret := make([]any, 0, a.Len())
- for _, entryList := range a.m {
- for _, entry := range entryList {
- ret = append(ret, entry.value)
- }
- }
- return ret
-}
-
-type endpointMapKey string
-
-// EndpointMap is a map of endpoints to arbitrary values keyed on only the
-// unordered set of address strings within an endpoint. This map is not thread
-// safe, thus it is unsafe to access concurrently. Must be created via
-// NewEndpointMap; do not construct directly.
-type EndpointMap struct {
- endpoints map[endpointMapKey]endpointData
-}
-
-type endpointData struct {
- // decodedKey stores the original key to avoid decoding when iterating on
- // EndpointMap keys.
- decodedKey Endpoint
- value any
-}
-
-// NewEndpointMap creates a new EndpointMap.
-func NewEndpointMap() *EndpointMap {
- return &EndpointMap{
- endpoints: make(map[endpointMapKey]endpointData),
- }
-}
-
-// encodeEndpoint returns a string that uniquely identifies the unordered set of
-// addresses within an endpoint.
-func encodeEndpoint(e Endpoint) endpointMapKey {
- addrs := make([]string, 0, len(e.Addresses))
- // base64 encoding the address strings restricts the characters present
- // within the strings. This allows us to use a delimiter without the need of
- // escape characters.
- for _, addr := range e.Addresses {
- addrs = append(addrs, base64.StdEncoding.EncodeToString([]byte(addr.Addr)))
- }
- sort.Strings(addrs)
- // " " should not appear in base64 encoded strings.
- return endpointMapKey(strings.Join(addrs, " "))
-}
-
-// Get returns the value for the address in the map, if present.
-func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) {
- val, found := em.endpoints[encodeEndpoint(e)]
- if found {
- return val.value, true
- }
- return nil, false
-}
-
-// Set updates or adds the value to the address in the map.
-func (em *EndpointMap) Set(e Endpoint, value any) {
- en := encodeEndpoint(e)
- em.endpoints[en] = endpointData{
- decodedKey: Endpoint{Addresses: e.Addresses},
- value: value,
- }
-}
-
-// Len returns the number of entries in the map.
-func (em *EndpointMap) Len() int {
- return len(em.endpoints)
-}
-
-// Keys returns a slice of all current map keys, as endpoints specifying the
-// addresses present in the endpoint keys, in which uniqueness is determined by
-// the unordered set of addresses. Thus, endpoint information returned is not
-// the full endpoint data (drops duplicated addresses and attributes) but can be
-// used for EndpointMap accesses.
-func (em *EndpointMap) Keys() []Endpoint {
- ret := make([]Endpoint, 0, len(em.endpoints))
- for _, en := range em.endpoints {
- ret = append(ret, en.decodedKey)
- }
- return ret
-}
-
-// Values returns a slice of all current map values.
-func (em *EndpointMap) Values() []any {
- ret := make([]any, 0, len(em.endpoints))
- for _, val := range em.endpoints {
- ret = append(ret, val.value)
- }
- return ret
-}
-
-// Delete removes the specified endpoint from the map.
-func (em *EndpointMap) Delete(e Endpoint) {
- en := encodeEndpoint(e)
- delete(em.endpoints, en)
-}
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
deleted file mode 100644
index b84ef26d4..000000000
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package resolver defines APIs for name resolution in gRPC.
-// All APIs in this package are experimental.
-package resolver
-
-import (
- "context"
- "errors"
- "fmt"
- "net"
- "net/url"
- "strings"
-
- "google.golang.org/grpc/attributes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/experimental/stats"
- "google.golang.org/grpc/internal"
- "google.golang.org/grpc/serviceconfig"
-)
-
-var (
- // m is a map from scheme to resolver builder.
- m = make(map[string]Builder)
- // defaultScheme is the default scheme to use.
- defaultScheme = "passthrough"
-)
-
-// TODO(bar) install dns resolver in init(){}.
-
-// Register registers the resolver builder to the resolver map. b.Scheme will
-// be used as the scheme registered with this builder. The registry is case
-// sensitive, and schemes should not contain any uppercase characters.
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple Resolvers are
-// registered with the same name, the one registered last will take effect.
-func Register(b Builder) {
- m[b.Scheme()] = b
-}
-
-// Get returns the resolver builder registered with the given scheme.
-//
-// If no builder is register with the scheme, nil will be returned.
-func Get(scheme string) Builder {
- if b, ok := m[scheme]; ok {
- return b
- }
- return nil
-}
-
-// SetDefaultScheme sets the default scheme that will be used. The default
-// scheme is initially set to "passthrough".
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. The scheme set last overrides
-// previously set values.
-func SetDefaultScheme(scheme string) {
- defaultScheme = scheme
- internal.UserSetDefaultScheme = true
-}
-
-// GetDefaultScheme gets the default scheme that will be used by grpc.Dial. If
-// SetDefaultScheme is never called, the default scheme used by grpc.NewClient is "dns" instead.
-func GetDefaultScheme() string {
- return defaultScheme
-}
-
-// Address represents a server the client connects to.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type Address struct {
- // Addr is the server address on which a connection will be established.
- Addr string
-
- // ServerName is the name of this address.
- // If non-empty, the ServerName is used as the transport certification authority for
- // the address, instead of the hostname from the Dial target string. In most cases,
- // this should not be set.
- //
- // WARNING: ServerName must only be populated with trusted values. It
- // is insecure to populate it with data from untrusted inputs since untrusted
- // values could be used to bypass the authority checks performed by TLS.
- ServerName string
-
- // Attributes contains arbitrary data about this address intended for
- // consumption by the SubConn.
- Attributes *attributes.Attributes
-
- // BalancerAttributes contains arbitrary data about this address intended
- // for consumption by the LB policy. These attributes do not affect SubConn
- // creation, connection establishment, handshaking, etc.
- //
- // Deprecated: when an Address is inside an Endpoint, this field should not
- // be used, and it will eventually be removed entirely.
- BalancerAttributes *attributes.Attributes
-
- // Metadata is the information associated with Addr, which may be used
- // to make load balancing decision.
- //
- // Deprecated: use Attributes instead.
- Metadata any
-}
-
-// Equal returns whether a and o are identical. Metadata is compared directly,
-// not with any recursive introspection.
-//
-// This method compares all fields of the address. When used to tell apart
-// addresses during subchannel creation or connection establishment, it might be
-// more appropriate for the caller to implement custom equality logic.
-func (a Address) Equal(o Address) bool {
- return a.Addr == o.Addr && a.ServerName == o.ServerName &&
- a.Attributes.Equal(o.Attributes) &&
- a.BalancerAttributes.Equal(o.BalancerAttributes) &&
- a.Metadata == o.Metadata
-}
-
-// String returns JSON formatted string representation of the address.
-func (a Address) String() string {
- var sb strings.Builder
- sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr))
- sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName))
- if a.Attributes != nil {
- sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String()))
- }
- if a.BalancerAttributes != nil {
- sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String()))
- }
- sb.WriteString("}")
- return sb.String()
-}
-
-// BuildOptions includes additional information for the builder to create
-// the resolver.
-type BuildOptions struct {
- // DisableServiceConfig indicates whether a resolver implementation should
- // fetch service config data.
- DisableServiceConfig bool
- // DialCreds is the transport credentials used by the ClientConn for
- // communicating with the target gRPC service (set via
- // WithTransportCredentials). In cases where a name resolution service
- // requires the same credentials, the resolver may use this field. In most
- // cases though, it is not appropriate, and this field may be ignored.
- DialCreds credentials.TransportCredentials
- // CredsBundle is the credentials bundle used by the ClientConn for
- // communicating with the target gRPC service (set via
- // WithCredentialsBundle). In cases where a name resolution service
- // requires the same credentials, the resolver may use this field. In most
- // cases though, it is not appropriate, and this field may be ignored.
- CredsBundle credentials.Bundle
- // Dialer is the custom dialer used by the ClientConn for dialling the
- // target gRPC service (set via WithDialer). In cases where a name
- // resolution service requires the same dialer, the resolver may use this
- // field. In most cases though, it is not appropriate, and this field may
- // be ignored.
- Dialer func(context.Context, string) (net.Conn, error)
- // Authority is the effective authority of the clientconn for which the
- // resolver is built.
- Authority string
- // MetricsRecorder is the metrics recorder to do recording.
- MetricsRecorder stats.MetricsRecorder
-}
-
-// An Endpoint is one network endpoint, or server, which may have multiple
-// addresses with which it can be accessed.
-type Endpoint struct {
- // Addresses contains a list of addresses used to access this endpoint.
- Addresses []Address
-
- // Attributes contains arbitrary data about this endpoint intended for
- // consumption by the LB policy.
- Attributes *attributes.Attributes
-}
-
-// State contains the current Resolver state relevant to the ClientConn.
-type State struct {
- // Addresses is the latest set of resolved addresses for the target.
- //
- // If a resolver sets Addresses but does not set Endpoints, one Endpoint
- // will be created for each Address before the State is passed to the LB
- // policy. The BalancerAttributes of each entry in Addresses will be set
- // in Endpoints.Attributes, and be cleared in the Endpoint's Address's
- // BalancerAttributes.
- //
- // Soon, Addresses will be deprecated and replaced fully by Endpoints.
- Addresses []Address
-
- // Endpoints is the latest set of resolved endpoints for the target.
- //
- // If a resolver produces a State containing Endpoints but not Addresses,
- // it must take care to ensure the LB policies it selects will support
- // Endpoints.
- Endpoints []Endpoint
-
- // ServiceConfig contains the result from parsing the latest service
- // config. If it is nil, it indicates no service config is present or the
- // resolver does not provide service configs.
- ServiceConfig *serviceconfig.ParseResult
-
- // Attributes contains arbitrary data about the resolver intended for
- // consumption by the load balancing policy.
- Attributes *attributes.Attributes
-}
-
-// ClientConn contains the callbacks for resolver to notify any updates
-// to the gRPC ClientConn.
-//
-// This interface is to be implemented by gRPC. Users should not need a
-// brand new implementation of this interface. For the situations like
-// testing, the new implementation should embed this interface. This allows
-// gRPC to add new methods to this interface.
-type ClientConn interface {
- // UpdateState updates the state of the ClientConn appropriately.
- //
- // If an error is returned, the resolver should try to resolve the
- // target again. The resolver should use a backoff timer to prevent
- // overloading the server with requests. If a resolver is certain that
- // reresolving will not change the result, e.g. because it is
- // a watch-based resolver, returned errors can be ignored.
- //
- // If the resolved State is the same as the last reported one, calling
- // UpdateState can be omitted.
- UpdateState(State) error
- // ReportError notifies the ClientConn that the Resolver encountered an
- // error. The ClientConn then forwards this error to the load balancing
- // policy.
- ReportError(error)
- // NewAddress is called by resolver to notify ClientConn a new list
- // of resolved addresses.
- // The address list should be the complete list of resolved addresses.
- //
- // Deprecated: Use UpdateState instead.
- NewAddress(addresses []Address)
- // ParseServiceConfig parses the provided service config and returns an
- // object that provides the parsed config.
- ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult
-}
-
-// Target represents a target for gRPC, as specified in:
-// https://github.com/grpc/grpc/blob/master/doc/naming.md.
-// It is parsed from the target string that gets passed into Dial or DialContext
-// by the user. And gRPC passes it to the resolver and the balancer.
-//
-// If the target follows the naming spec, and the parsed scheme is registered
-// with gRPC, we will parse the target string according to the spec. If the
-// target does not contain a scheme or if the parsed scheme is not registered
-// (i.e. no corresponding resolver available to resolve the endpoint), we will
-// apply the default scheme, and will attempt to reparse it.
-type Target struct {
- // URL contains the parsed dial target with an optional default scheme added
- // to it if the original dial target contained no scheme or contained an
- // unregistered scheme. Any query params specified in the original dial
- // target can be accessed from here.
- URL url.URL
-}
-
-// Endpoint retrieves endpoint without leading "/" from either `URL.Path`
-// or `URL.Opaque`. The latter is used when the former is empty.
-func (t Target) Endpoint() string {
- endpoint := t.URL.Path
- if endpoint == "" {
- endpoint = t.URL.Opaque
- }
- // For targets of the form "[scheme]://[authority]/endpoint, the endpoint
- // value returned from url.Parse() contains a leading "/". Although this is
- // in accordance with RFC 3986, we do not want to break existing resolver
- // implementations which expect the endpoint without the leading "/". So, we
- // end up stripping the leading "/" here. But this will result in an
- // incorrect parsing for something like "unix:///path/to/socket". Since we
- // own the "unix" resolver, we can workaround in the unix resolver by using
- // the `URL` field.
- return strings.TrimPrefix(endpoint, "/")
-}
-
-// String returns the canonical string representation of Target.
-func (t Target) String() string {
- return t.URL.Scheme + "://" + t.URL.Host + "/" + t.Endpoint()
-}
-
-// Builder creates a resolver that will be used to watch name resolution updates.
-type Builder interface {
- // Build creates a new resolver for the given target.
- //
- // gRPC dial calls Build synchronously, and fails if the returned error is
- // not nil.
- Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error)
- // Scheme returns the scheme supported by this resolver. Scheme is defined
- // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned
- // string should not contain uppercase characters, as they will not match
- // the parsed target's scheme as defined in RFC 3986.
- Scheme() string
-}
-
-// ResolveNowOptions includes additional information for ResolveNow.
-type ResolveNowOptions struct{}
-
-// Resolver watches for the updates on the specified target.
-// Updates include address updates and service config updates.
-type Resolver interface {
- // ResolveNow will be called by gRPC to try to resolve the target name
- // again. It's just a hint, resolver can ignore this if it's not necessary.
- //
- // It could be called multiple times concurrently.
- ResolveNow(ResolveNowOptions)
- // Close closes the resolver.
- Close()
-}
-
-// AuthorityOverrider is implemented by Builders that wish to override the
-// default authority for the ClientConn.
-// By default, the authority used is target.Endpoint().
-type AuthorityOverrider interface {
- // OverrideAuthority returns the authority to use for a ClientConn with the
- // given target. The implementation must generate it without blocking,
- // typically in line, and must keep it unchanged.
- OverrideAuthority(Target) string
-}
-
-// ValidateEndpoints validates endpoints from a petiole policy's perspective.
-// Petiole policies should call this before calling into their children. See
-// [gRPC A61](https://github.com/grpc/proposal/blob/master/A61-IPv4-IPv6-dualstack-backends.md)
-// for details.
-func ValidateEndpoints(endpoints []Endpoint) error {
- if len(endpoints) == 0 {
- return errors.New("endpoints list is empty")
- }
-
- for _, endpoint := range endpoints {
- for range endpoint.Addresses {
- return nil
- }
- }
- return errors.New("endpoints list contains no addresses")
-}
diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go
deleted file mode 100644
index 80e16a327..000000000
--- a/vendor/google.golang.org/grpc/resolver_wrapper.go
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
- "strings"
- "sync"
-
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcsync"
- "google.golang.org/grpc/internal/pretty"
- "google.golang.org/grpc/internal/resolver/delegatingresolver"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/serviceconfig"
-)
-
-// ccResolverWrapper is a wrapper on top of cc for resolvers.
-// It implements resolver.ClientConn interface.
-type ccResolverWrapper struct {
- // The following fields are initialized when the wrapper is created and are
- // read-only afterwards, and therefore can be accessed without a mutex.
- cc *ClientConn
- ignoreServiceConfig bool
- serializer *grpcsync.CallbackSerializer
- serializerCancel context.CancelFunc
-
- resolver resolver.Resolver // only accessed within the serializer
-
- // The following fields are protected by mu. Caller must take cc.mu before
- // taking mu.
- mu sync.Mutex
- curState resolver.State
- closed bool
-}
-
-// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used
-// after calling start, which builds the resolver.
-func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper {
- ctx, cancel := context.WithCancel(cc.ctx)
- return &ccResolverWrapper{
- cc: cc,
- ignoreServiceConfig: cc.dopts.disableServiceConfig,
- serializer: grpcsync.NewCallbackSerializer(ctx),
- serializerCancel: cancel,
- }
-}
-
-// start builds the name resolver using the resolver.Builder in cc and returns
-// any error encountered. It must always be the first operation performed on
-// any newly created ccResolverWrapper, except that close may be called instead.
-func (ccr *ccResolverWrapper) start() error {
- errCh := make(chan error)
- ccr.serializer.TrySchedule(func(ctx context.Context) {
- if ctx.Err() != nil {
- return
- }
- opts := resolver.BuildOptions{
- DisableServiceConfig: ccr.cc.dopts.disableServiceConfig,
- DialCreds: ccr.cc.dopts.copts.TransportCredentials,
- CredsBundle: ccr.cc.dopts.copts.CredsBundle,
- Dialer: ccr.cc.dopts.copts.Dialer,
- Authority: ccr.cc.authority,
- MetricsRecorder: ccr.cc.metricsRecorderList,
- }
- var err error
- // The delegating resolver is used unless:
- // - A custom dialer is provided via WithContextDialer dialoption or
- // - Proxy usage is disabled through WithNoProxy dialoption.
- // In these cases, the resolver is built based on the scheme of target,
- // using the appropriate resolver builder.
- if ccr.cc.dopts.copts.Dialer != nil || !ccr.cc.dopts.useProxy {
- ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts)
- } else {
- ccr.resolver, err = delegatingresolver.New(ccr.cc.parsedTarget, ccr, opts, ccr.cc.resolverBuilder, ccr.cc.dopts.enableLocalDNSResolution)
- }
- errCh <- err
- })
- return <-errCh
-}
-
-func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
- ccr.serializer.TrySchedule(func(ctx context.Context) {
- if ctx.Err() != nil || ccr.resolver == nil {
- return
- }
- ccr.resolver.ResolveNow(o)
- })
-}
-
-// close initiates async shutdown of the wrapper. To determine the wrapper has
-// finished shutting down, the channel should block on ccr.serializer.Done()
-// without cc.mu held.
-func (ccr *ccResolverWrapper) close() {
- channelz.Info(logger, ccr.cc.channelz, "Closing the name resolver")
- ccr.mu.Lock()
- ccr.closed = true
- ccr.mu.Unlock()
-
- ccr.serializer.TrySchedule(func(context.Context) {
- if ccr.resolver == nil {
- return
- }
- ccr.resolver.Close()
- ccr.resolver = nil
- })
- ccr.serializerCancel()
-}
-
-// UpdateState is called by resolver implementations to report new state to gRPC
-// which includes addresses and service config.
-func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
- ccr.cc.mu.Lock()
- ccr.mu.Lock()
- if ccr.closed {
- ccr.mu.Unlock()
- ccr.cc.mu.Unlock()
- return nil
- }
- if s.Endpoints == nil {
- s.Endpoints = addressesToEndpoints(s.Addresses)
- }
- ccr.addChannelzTraceEvent(s)
- ccr.curState = s
- ccr.mu.Unlock()
- return ccr.cc.updateResolverStateAndUnlock(s, nil)
-}
-
-// ReportError is called by resolver implementations to report errors
-// encountered during name resolution to gRPC.
-func (ccr *ccResolverWrapper) ReportError(err error) {
- ccr.cc.mu.Lock()
- ccr.mu.Lock()
- if ccr.closed {
- ccr.mu.Unlock()
- ccr.cc.mu.Unlock()
- return
- }
- ccr.mu.Unlock()
- channelz.Warningf(logger, ccr.cc.channelz, "ccResolverWrapper: reporting error to cc: %v", err)
- ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err)
-}
-
-// NewAddress is called by the resolver implementation to send addresses to
-// gRPC.
-func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
- ccr.cc.mu.Lock()
- ccr.mu.Lock()
- if ccr.closed {
- ccr.mu.Unlock()
- ccr.cc.mu.Unlock()
- return
- }
- s := resolver.State{
- Addresses: addrs,
- ServiceConfig: ccr.curState.ServiceConfig,
- Endpoints: addressesToEndpoints(addrs),
- }
- ccr.addChannelzTraceEvent(s)
- ccr.curState = s
- ccr.mu.Unlock()
- ccr.cc.updateResolverStateAndUnlock(s, nil)
-}
-
-// ParseServiceConfig is called by resolver implementations to parse a JSON
-// representation of the service config.
-func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
- return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts)
-}
-
-// addChannelzTraceEvent adds a channelz trace event containing the new
-// state received from resolver implementations.
-func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
- if !logger.V(0) && !channelz.IsOn() {
- return
- }
- var updates []string
- var oldSC, newSC *ServiceConfig
- var oldOK, newOK bool
- if ccr.curState.ServiceConfig != nil {
- oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig)
- }
- if s.ServiceConfig != nil {
- newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig)
- }
- if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
- updates = append(updates, "service config updated")
- }
- if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {
- updates = append(updates, "resolver returned an empty address list")
- } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
- updates = append(updates, "resolver returned new addresses")
- }
- channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
-}
-
-func addressesToEndpoints(addrs []resolver.Address) []resolver.Endpoint {
- endpoints := make([]resolver.Endpoint, 0, len(addrs))
- for _, a := range addrs {
- ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
- ep.Addresses[0].BalancerAttributes = nil
- endpoints = append(endpoints, ep)
- }
- return endpoints
-}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
deleted file mode 100644
index ad20e9dff..000000000
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ /dev/null
@@ -1,1055 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "compress/gzip"
- "context"
- "encoding/binary"
- "fmt"
- "io"
- "math"
- "strings"
- "sync"
- "time"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/encoding"
- "google.golang.org/grpc/encoding/proto"
- "google.golang.org/grpc/internal/transport"
- "google.golang.org/grpc/mem"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
-)
-
-// Compressor defines the interface gRPC uses to compress a message.
-//
-// Deprecated: use package encoding.
-type Compressor interface {
- // Do compresses p into w.
- Do(w io.Writer, p []byte) error
- // Type returns the compression algorithm the Compressor uses.
- Type() string
-}
-
-type gzipCompressor struct {
- pool sync.Pool
-}
-
-// NewGZIPCompressor creates a Compressor based on GZIP.
-//
-// Deprecated: use package encoding/gzip.
-func NewGZIPCompressor() Compressor {
- c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression)
- return c
-}
-
-// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead
-// of assuming DefaultCompression.
-//
-// The error returned will be nil if the level is valid.
-//
-// Deprecated: use package encoding/gzip.
-func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
- if level < gzip.DefaultCompression || level > gzip.BestCompression {
- return nil, fmt.Errorf("grpc: invalid compression level: %d", level)
- }
- return &gzipCompressor{
- pool: sync.Pool{
- New: func() any {
- w, err := gzip.NewWriterLevel(io.Discard, level)
- if err != nil {
- panic(err)
- }
- return w
- },
- },
- }, nil
-}
-
-func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
- z := c.pool.Get().(*gzip.Writer)
- defer c.pool.Put(z)
- z.Reset(w)
- if _, err := z.Write(p); err != nil {
- return err
- }
- return z.Close()
-}
-
-func (c *gzipCompressor) Type() string {
- return "gzip"
-}
-
-// Decompressor defines the interface gRPC uses to decompress a message.
-//
-// Deprecated: use package encoding.
-type Decompressor interface {
- // Do reads the data from r and uncompress them.
- Do(r io.Reader) ([]byte, error)
- // Type returns the compression algorithm the Decompressor uses.
- Type() string
-}
-
-type gzipDecompressor struct {
- pool sync.Pool
-}
-
-// NewGZIPDecompressor creates a Decompressor based on GZIP.
-//
-// Deprecated: use package encoding/gzip.
-func NewGZIPDecompressor() Decompressor {
- return &gzipDecompressor{}
-}
-
-func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
- var z *gzip.Reader
- switch maybeZ := d.pool.Get().(type) {
- case nil:
- newZ, err := gzip.NewReader(r)
- if err != nil {
- return nil, err
- }
- z = newZ
- case *gzip.Reader:
- z = maybeZ
- if err := z.Reset(r); err != nil {
- d.pool.Put(z)
- return nil, err
- }
- }
-
- defer func() {
- z.Close()
- d.pool.Put(z)
- }()
- return io.ReadAll(z)
-}
-
-func (d *gzipDecompressor) Type() string {
- return "gzip"
-}
-
-// callInfo contains all related configuration and information about an RPC.
-type callInfo struct {
- compressorName string
- failFast bool
- maxReceiveMessageSize *int
- maxSendMessageSize *int
- creds credentials.PerRPCCredentials
- contentSubtype string
- codec baseCodec
- maxRetryRPCBufferSize int
- onFinish []func(err error)
-}
-
-func defaultCallInfo() *callInfo {
- return &callInfo{
- failFast: true,
- maxRetryRPCBufferSize: 256 * 1024, // 256KB
- }
-}
-
-// CallOption configures a Call before it starts or extracts information from
-// a Call after it completes.
-type CallOption interface {
- // before is called before the call is sent to any server. If before
- // returns a non-nil error, the RPC fails with that error.
- before(*callInfo) error
-
- // after is called after the call has completed. after cannot return an
- // error, so any failures should be reported via output parameters.
- after(*callInfo, *csAttempt)
-}
-
-// EmptyCallOption does not alter the Call configuration.
-// It can be embedded in another structure to carry satellite data for use
-// by interceptors.
-type EmptyCallOption struct{}
-
-func (EmptyCallOption) before(*callInfo) error { return nil }
-func (EmptyCallOption) after(*callInfo, *csAttempt) {}
-
-// StaticMethod returns a CallOption which specifies that a call is being made
-// to a method that is static, which means the method is known at compile time
-// and doesn't change at runtime. This can be used as a signal to stats plugins
-// that this method is safe to include as a key to a measurement.
-func StaticMethod() CallOption {
- return StaticMethodCallOption{}
-}
-
-// StaticMethodCallOption is a CallOption that specifies that a call comes
-// from a static method.
-type StaticMethodCallOption struct {
- EmptyCallOption
-}
-
-// Header returns a CallOptions that retrieves the header metadata
-// for a unary RPC.
-func Header(md *metadata.MD) CallOption {
- return HeaderCallOption{HeaderAddr: md}
-}
-
-// HeaderCallOption is a CallOption for collecting response header metadata.
-// The metadata field will be populated *after* the RPC completes.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type HeaderCallOption struct {
- HeaderAddr *metadata.MD
-}
-
-func (o HeaderCallOption) before(*callInfo) error { return nil }
-func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) {
- *o.HeaderAddr, _ = attempt.transportStream.Header()
-}
-
-// Trailer returns a CallOptions that retrieves the trailer metadata
-// for a unary RPC.
-func Trailer(md *metadata.MD) CallOption {
- return TrailerCallOption{TrailerAddr: md}
-}
-
-// TrailerCallOption is a CallOption for collecting response trailer metadata.
-// The metadata field will be populated *after* the RPC completes.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type TrailerCallOption struct {
- TrailerAddr *metadata.MD
-}
-
-func (o TrailerCallOption) before(*callInfo) error { return nil }
-func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) {
- *o.TrailerAddr = attempt.transportStream.Trailer()
-}
-
-// Peer returns a CallOption that retrieves peer information for a unary RPC.
-// The peer field will be populated *after* the RPC completes.
-func Peer(p *peer.Peer) CallOption {
- return PeerCallOption{PeerAddr: p}
-}
-
-// PeerCallOption is a CallOption for collecting the identity of the remote
-// peer. The peer field will be populated *after* the RPC completes.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type PeerCallOption struct {
- PeerAddr *peer.Peer
-}
-
-func (o PeerCallOption) before(*callInfo) error { return nil }
-func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) {
- if x, ok := peer.FromContext(attempt.transportStream.Context()); ok {
- *o.PeerAddr = *x
- }
-}
-
-// WaitForReady configures the RPC's behavior when the client is in
-// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If
-// waitForReady is false, the RPC will fail immediately. Otherwise, the client
-// will wait until a connection becomes available or the RPC's deadline is
-// reached.
-//
-// By default, RPCs do not "wait for ready".
-func WaitForReady(waitForReady bool) CallOption {
- return FailFastCallOption{FailFast: !waitForReady}
-}
-
-// FailFast is the opposite of WaitForReady.
-//
-// Deprecated: use WaitForReady.
-func FailFast(failFast bool) CallOption {
- return FailFastCallOption{FailFast: failFast}
-}
-
-// FailFastCallOption is a CallOption for indicating whether an RPC should fail
-// fast or not.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type FailFastCallOption struct {
- FailFast bool
-}
-
-func (o FailFastCallOption) before(c *callInfo) error {
- c.failFast = o.FailFast
- return nil
-}
-func (o FailFastCallOption) after(*callInfo, *csAttempt) {}
-
-// OnFinish returns a CallOption that configures a callback to be called when
-// the call completes. The error passed to the callback is the status of the
-// RPC, and may be nil. The onFinish callback provided will only be called once
-// by gRPC. This is mainly used to be used by streaming interceptors, to be
-// notified when the RPC completes along with information about the status of
-// the RPC.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func OnFinish(onFinish func(err error)) CallOption {
- return OnFinishCallOption{
- OnFinish: onFinish,
- }
-}
-
-// OnFinishCallOption is CallOption that indicates a callback to be called when
-// the call completes.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type OnFinishCallOption struct {
- OnFinish func(error)
-}
-
-func (o OnFinishCallOption) before(c *callInfo) error {
- c.onFinish = append(c.onFinish, o.OnFinish)
- return nil
-}
-
-func (o OnFinishCallOption) after(*callInfo, *csAttempt) {}
-
-// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
-// in bytes the client can receive. If this is not set, gRPC uses the default
-// 4MB.
-func MaxCallRecvMsgSize(bytes int) CallOption {
- return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes}
-}
-
-// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
-// size in bytes the client can receive.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type MaxRecvMsgSizeCallOption struct {
- MaxRecvMsgSize int
-}
-
-func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
- c.maxReceiveMessageSize = &o.MaxRecvMsgSize
- return nil
-}
-func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {}
-
-// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
-// in bytes the client can send. If this is not set, gRPC uses the default
-// `math.MaxInt32`.
-func MaxCallSendMsgSize(bytes int) CallOption {
- return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes}
-}
-
-// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
-// size in bytes the client can send.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type MaxSendMsgSizeCallOption struct {
- MaxSendMsgSize int
-}
-
-func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
- c.maxSendMessageSize = &o.MaxSendMsgSize
- return nil
-}
-func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {}
-
-// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
-// for a call.
-func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
- return PerRPCCredsCallOption{Creds: creds}
-}
-
-// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
-// credentials to use for the call.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type PerRPCCredsCallOption struct {
- Creds credentials.PerRPCCredentials
-}
-
-func (o PerRPCCredsCallOption) before(c *callInfo) error {
- c.creds = o.Creds
- return nil
-}
-func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {}
-
-// UseCompressor returns a CallOption which sets the compressor used when
-// sending the request. If WithCompressor is also set, UseCompressor has
-// higher priority.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func UseCompressor(name string) CallOption {
- return CompressorCallOption{CompressorType: name}
-}
-
-// CompressorCallOption is a CallOption that indicates the compressor to use.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type CompressorCallOption struct {
- CompressorType string
-}
-
-func (o CompressorCallOption) before(c *callInfo) error {
- c.compressorName = o.CompressorType
- return nil
-}
-func (o CompressorCallOption) after(*callInfo, *csAttempt) {}
-
-// CallContentSubtype returns a CallOption that will set the content-subtype
-// for a call. For example, if content-subtype is "json", the Content-Type over
-// the wire will be "application/grpc+json". The content-subtype is converted
-// to lowercase before being included in Content-Type. See Content-Type on
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-//
-// If ForceCodec is not also used, the content-subtype will be used to look up
-// the Codec to use in the registry controlled by RegisterCodec. See the
-// documentation on RegisterCodec for details on registration. The lookup of
-// content-subtype is case-insensitive. If no such Codec is found, the call
-// will result in an error with code codes.Internal.
-//
-// If ForceCodec is also used, that Codec will be used for all request and
-// response messages, with the content-subtype set to the given contentSubtype
-// here for requests.
-func CallContentSubtype(contentSubtype string) CallOption {
- return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)}
-}
-
-// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
-// used for marshaling messages.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type ContentSubtypeCallOption struct {
- ContentSubtype string
-}
-
-func (o ContentSubtypeCallOption) before(c *callInfo) error {
- c.contentSubtype = o.ContentSubtype
- return nil
-}
-func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {}
-
-// ForceCodec returns a CallOption that will set codec to be used for all
-// request and response messages for a call. The result of calling Name() will
-// be used as the content-subtype after converting to lowercase, unless
-// CallContentSubtype is also used.
-//
-// See Content-Type on
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details. Also see the documentation on RegisterCodec and
-// CallContentSubtype for more details on the interaction between Codec and
-// content-subtype.
-//
-// This function is provided for advanced users; prefer to use only
-// CallContentSubtype to select a registered codec instead.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func ForceCodec(codec encoding.Codec) CallOption {
- return ForceCodecCallOption{Codec: codec}
-}
-
-// ForceCodecCallOption is a CallOption that indicates the codec used for
-// marshaling messages.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type ForceCodecCallOption struct {
- Codec encoding.Codec
-}
-
-func (o ForceCodecCallOption) before(c *callInfo) error {
- c.codec = newCodecV1Bridge(o.Codec)
- return nil
-}
-func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {}
-
-// ForceCodecV2 returns a CallOption that will set codec to be used for all
-// request and response messages for a call. The result of calling Name() will
-// be used as the content-subtype after converting to lowercase, unless
-// CallContentSubtype is also used.
-//
-// See Content-Type on
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details. Also see the documentation on RegisterCodec and
-// CallContentSubtype for more details on the interaction between Codec and
-// content-subtype.
-//
-// This function is provided for advanced users; prefer to use only
-// CallContentSubtype to select a registered codec instead.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func ForceCodecV2(codec encoding.CodecV2) CallOption {
- return ForceCodecV2CallOption{CodecV2: codec}
-}
-
-// ForceCodecV2CallOption is a CallOption that indicates the codec used for
-// marshaling messages.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type ForceCodecV2CallOption struct {
- CodecV2 encoding.CodecV2
-}
-
-func (o ForceCodecV2CallOption) before(c *callInfo) error {
- c.codec = o.CodecV2
- return nil
-}
-
-func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {}
-
-// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of
-// an encoding.Codec.
-//
-// Deprecated: use ForceCodec instead.
-func CallCustomCodec(codec Codec) CallOption {
- return CustomCodecCallOption{Codec: codec}
-}
-
-// CustomCodecCallOption is a CallOption that indicates the codec used for
-// marshaling messages.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type CustomCodecCallOption struct {
- Codec Codec
-}
-
-func (o CustomCodecCallOption) before(c *callInfo) error {
- c.codec = newCodecV0Bridge(o.Codec)
- return nil
-}
-func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {}
-
-// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
-// used for buffering this RPC's requests for retry purposes.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func MaxRetryRPCBufferSize(bytes int) CallOption {
- return MaxRetryRPCBufferSizeCallOption{bytes}
-}
-
-// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
-// memory to be used for caching this RPC for retry purposes.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type MaxRetryRPCBufferSizeCallOption struct {
- MaxRetryRPCBufferSize int
-}
-
-func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error {
- c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
- return nil
-}
-func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {}
-
-// The format of the payload: compressed or not?
-type payloadFormat uint8
-
-const (
- compressionNone payloadFormat = 0 // no compression
- compressionMade payloadFormat = 1 // compressed
-)
-
-func (pf payloadFormat) isCompressed() bool {
- return pf == compressionMade
-}
-
-type streamReader interface {
- ReadMessageHeader(header []byte) error
- Read(n int) (mem.BufferSlice, error)
-}
-
-// parser reads complete gRPC messages from the underlying reader.
-type parser struct {
- // r is the underlying reader.
- // See the comment on recvMsg for the permissible
- // error types.
- r streamReader
-
- // The header of a gRPC message. Find more detail at
- // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
- header [5]byte
-
- // bufferPool is the pool of shared receive buffers.
- bufferPool mem.BufferPool
-}
-
-// recvMsg reads a complete gRPC message from the stream.
-//
-// It returns the message and its payload (compression/encoding)
-// format. The caller owns the returned msg memory.
-//
-// If there is an error, possible values are:
-// - io.EOF, when no messages remain
-// - io.ErrUnexpectedEOF
-// - of type transport.ConnectionError
-// - an error from the status package
-//
-// No other error values or types must be returned, which also means
-// that the underlying streamReader must not return an incompatible
-// error.
-func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) {
- err := p.r.ReadMessageHeader(p.header[:])
- if err != nil {
- return 0, nil, err
- }
-
- pf := payloadFormat(p.header[0])
- length := binary.BigEndian.Uint32(p.header[1:])
-
- if int64(length) > int64(maxInt) {
- return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
- }
- if int(length) > maxReceiveMessageSize {
- return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
- }
-
- data, err := p.r.Read(int(length))
- if err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return 0, nil, err
- }
- return pf, data, nil
-}
-
-// encode serializes msg and returns a buffer containing the message, or an
-// error if it is too large to be transmitted by grpc. If msg is nil, it
-// generates an empty message.
-func encode(c baseCodec, msg any) (mem.BufferSlice, error) {
- if msg == nil { // NOTE: typed nils will not be caught by this check
- return nil, nil
- }
- b, err := c.Marshal(msg)
- if err != nil {
- return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
- }
- if bufSize := uint(b.Len()); bufSize > math.MaxUint32 {
- b.Free()
- return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", bufSize)
- }
- return b, nil
-}
-
-// compress returns the input bytes compressed by compressor or cp.
-// If both compressors are nil, or if the message has zero length, returns nil,
-// indicating no compression was done.
-//
-// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
-func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) {
- if (compressor == nil && cp == nil) || in.Len() == 0 {
- return nil, compressionNone, nil
- }
- var out mem.BufferSlice
- w := mem.NewWriter(&out, pool)
- wrapErr := func(err error) error {
- out.Free()
- return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
- }
- if compressor != nil {
- z, err := compressor.Compress(w)
- if err != nil {
- return nil, 0, wrapErr(err)
- }
- for _, b := range in {
- if _, err := z.Write(b.ReadOnlyData()); err != nil {
- return nil, 0, wrapErr(err)
- }
- }
- if err := z.Close(); err != nil {
- return nil, 0, wrapErr(err)
- }
- } else {
- // This is obviously really inefficient since it fully materializes the data, but
- // there is no way around this with the old Compressor API. At least it attempts
- // to return the buffer to the provider, in the hopes it can be reused (maybe
- // even by a subsequent call to this very function).
- buf := in.MaterializeToBuffer(pool)
- defer buf.Free()
- if err := cp.Do(w, buf.ReadOnlyData()); err != nil {
- return nil, 0, wrapErr(err)
- }
- }
- return out, compressionMade, nil
-}
-
-const (
- payloadLen = 1
- sizeLen = 4
- headerLen = payloadLen + sizeLen
-)
-
-// msgHeader returns a 5-byte header for the message being transmitted and the
-// payload, which is compData if non-nil or data otherwise.
-func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) {
- hdr = make([]byte, headerLen)
- hdr[0] = byte(pf)
-
- var length uint32
- if pf.isCompressed() {
- length = uint32(compData.Len())
- payload = compData
- } else {
- length = uint32(data.Len())
- payload = data
- }
-
- // Write length of payload into buf
- binary.BigEndian.PutUint32(hdr[payloadLen:], length)
- return hdr, payload
-}
-
-func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload {
- return &stats.OutPayload{
- Client: client,
- Payload: msg,
- Length: dataLength,
- WireLength: payloadLength + headerLen,
- CompressedLength: payloadLength,
- SentTime: t,
- }
-}
-
-func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status {
- switch pf {
- case compressionNone:
- case compressionMade:
- if recvCompress == "" || recvCompress == encoding.Identity {
- return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
- }
- if !haveCompressor {
- if isServer {
- return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
- }
- return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
- }
- default:
- return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
- }
- return nil
-}
-
-type payloadInfo struct {
- compressedLength int // The compressed length got from wire.
- uncompressedBytes mem.BufferSlice
-}
-
-func (p *payloadInfo) free() {
- if p != nil && p.uncompressedBytes != nil {
- p.uncompressedBytes.Free()
- }
-}
-
-// recvAndDecompress reads a message from the stream, decompressing it if necessary.
-//
-// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as
-// the buffer is no longer needed.
-// TODO: Refactor this function to reduce the number of arguments.
-// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists
-func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool,
-) (out mem.BufferSlice, err error) {
- pf, compressed, err := p.recvMsg(maxReceiveMessageSize)
- if err != nil {
- return nil, err
- }
-
- compressedLength := compressed.Len()
-
- if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil {
- compressed.Free()
- return nil, st.Err()
- }
-
- if pf.isCompressed() {
- defer compressed.Free()
- // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
- // use this decompressor as the default.
- out, err = decompress(compressor, compressed, dc, maxReceiveMessageSize, p.bufferPool)
- if err != nil {
- return nil, err
- }
- } else {
- out = compressed
- }
-
- if payInfo != nil {
- payInfo.compressedLength = compressedLength
- out.Ref()
- payInfo.uncompressedBytes = out
- }
-
- return out, nil
-}
-
-// decompress processes the given data by decompressing it using either a custom decompressor or a standard compressor.
-// If a custom decompressor is provided, it takes precedence. The function validates that the decompressed data
-// does not exceed the specified maximum size and returns an error if this limit is exceeded.
-// On success, it returns the decompressed data. Otherwise, it returns an error if decompression fails or the data exceeds the size limit.
-func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompressor, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, error) {
- if dc != nil {
- uncompressed, err := dc.Do(d.Reader())
- if err != nil {
- return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
- }
- if len(uncompressed) > maxReceiveMessageSize {
- return nil, status.Errorf(codes.ResourceExhausted, "grpc: message after decompression larger than max (%d vs. %d)", len(uncompressed), maxReceiveMessageSize)
- }
- return mem.BufferSlice{mem.SliceBuffer(uncompressed)}, nil
- }
- if compressor != nil {
- dcReader, err := compressor.Decompress(d.Reader())
- if err != nil {
- return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the message: %v", err)
- }
-
- // Read at most one byte more than the limit from the decompressor.
- // Unless the limit is MaxInt64, in which case, that's impossible, so
- // apply no limit.
- if limit := int64(maxReceiveMessageSize); limit < math.MaxInt64 {
- dcReader = io.LimitReader(dcReader, limit+1)
- }
- out, err := mem.ReadAll(dcReader, pool)
- if err != nil {
- out.Free()
- return nil, status.Errorf(codes.Internal, "grpc: failed to read decompressed data: %v", err)
- }
-
- if out.Len() > maxReceiveMessageSize {
- out.Free()
- return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max %d", maxReceiveMessageSize)
- }
- return out, nil
- }
- return nil, status.Errorf(codes.Internal, "grpc: no decompressor available for compressed payload")
-}
-
-type recvCompressor interface {
- RecvCompress() string
-}
-
-// For the two compressor parameters, both should not be set, but if they are,
-// dc takes precedence over compressor.
-// TODO(dfawley): wrap the old compressor/decompressor using the new API?
-func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error {
- data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer)
- if err != nil {
- return err
- }
-
- // If the codec wants its own reference to the data, it can get it. Otherwise, always
- // free the buffers.
- defer data.Free()
-
- if err := c.Unmarshal(data, m); err != nil {
- return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
- }
-
- return nil
-}
-
-// Information about RPC
-type rpcInfo struct {
- failfast bool
- preloaderInfo *compressorInfo
-}
-
-// Information about Preloader
-// Responsible for storing codec, and compressors
-// If stream (s) has context s.Context which stores rpcInfo that has non nil
-// pointers to codec, and compressors, then we can use preparedMsg for Async message prep
-// and reuse marshalled bytes
-type compressorInfo struct {
- codec baseCodec
- cp Compressor
- comp encoding.Compressor
-}
-
-type rpcInfoContextKey struct{}
-
-func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context {
- return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{
- failfast: failfast,
- preloaderInfo: &compressorInfo{
- codec: codec,
- cp: cp,
- comp: comp,
- },
- })
-}
-
-func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
- s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo)
- return
-}
-
-// Code returns the error code for err if it was produced by the rpc system.
-// Otherwise, it returns codes.Unknown.
-//
-// Deprecated: use status.Code instead.
-func Code(err error) codes.Code {
- return status.Code(err)
-}
-
-// ErrorDesc returns the error description of err if it was produced by the rpc system.
-// Otherwise, it returns err.Error() or empty string when err is nil.
-//
-// Deprecated: use status.Convert and Message method instead.
-func ErrorDesc(err error) string {
- return status.Convert(err).Message()
-}
-
-// Errorf returns an error containing an error code and a description;
-// Errorf returns nil if c is OK.
-//
-// Deprecated: use status.Errorf instead.
-func Errorf(c codes.Code, format string, a ...any) error {
- return status.Errorf(c, format, a...)
-}
-
-var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error())
-var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error())
-
-// toRPCErr converts an error into an error from the status package.
-func toRPCErr(err error) error {
- switch err {
- case nil, io.EOF:
- return err
- case context.DeadlineExceeded:
- return errContextDeadline
- case context.Canceled:
- return errContextCanceled
- case io.ErrUnexpectedEOF:
- return status.Error(codes.Internal, err.Error())
- }
-
- switch e := err.(type) {
- case transport.ConnectionError:
- return status.Error(codes.Unavailable, e.Desc)
- case *transport.NewStreamError:
- return toRPCErr(e.Err)
- }
-
- if _, ok := status.FromError(err); ok {
- return err
- }
-
- return status.Error(codes.Unknown, err.Error())
-}
-
-// setCallInfoCodec should only be called after CallOptions have been applied.
-func setCallInfoCodec(c *callInfo) error {
- if c.codec != nil {
- // codec was already set by a CallOption; use it, but set the content
- // subtype if it is not set.
- if c.contentSubtype == "" {
- // c.codec is a baseCodec to hide the difference between grpc.Codec and
- // encoding.Codec (Name vs. String method name). We only support
- // setting content subtype from encoding.Codec to avoid a behavior
- // change with the deprecated version.
- if ec, ok := c.codec.(encoding.CodecV2); ok {
- c.contentSubtype = strings.ToLower(ec.Name())
- }
- }
- return nil
- }
-
- if c.contentSubtype == "" {
- // No codec specified in CallOptions; use proto by default.
- c.codec = getCodec(proto.Name)
- return nil
- }
-
- // c.contentSubtype is already lowercased in CallContentSubtype
- c.codec = getCodec(c.contentSubtype)
- if c.codec == nil {
- return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
- }
- return nil
-}
-
-// The SupportPackageIsVersion variables are referenced from generated protocol
-// buffer files to ensure compatibility with the gRPC version used. The latest
-// support package version is 9.
-//
-// Older versions are kept for compatibility.
-//
-// These constants should not be referenced from any other code.
-const (
- SupportPackageIsVersion3 = true
- SupportPackageIsVersion4 = true
- SupportPackageIsVersion5 = true
- SupportPackageIsVersion6 = true
- SupportPackageIsVersion7 = true
- SupportPackageIsVersion8 = true
- SupportPackageIsVersion9 = true
-)
-
-const grpcUA = "grpc-go/" + Version
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
deleted file mode 100644
index 976e70ae0..000000000
--- a/vendor/google.golang.org/grpc/server.go
+++ /dev/null
@@ -1,2218 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "math"
- "net"
- "net/http"
- "reflect"
- "runtime"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/encoding"
- "google.golang.org/grpc/encoding/proto"
- estats "google.golang.org/grpc/experimental/stats"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal"
- "google.golang.org/grpc/internal/binarylog"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcsync"
- "google.golang.org/grpc/internal/grpcutil"
- istats "google.golang.org/grpc/internal/stats"
- "google.golang.org/grpc/internal/transport"
- "google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/mem"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
- "google.golang.org/grpc/tap"
-)
-
-const (
- defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4
- defaultServerMaxSendMessageSize = math.MaxInt32
-
- // Server transports are tracked in a map which is keyed on listener
- // address. For regular gRPC traffic, connections are accepted in Serve()
- // through a call to Accept(), and we use the actual listener address as key
- // when we add it to the map. But for connections received through
- // ServeHTTP(), we do not have a listener and hence use this dummy value.
- listenerAddressForServeHTTP = "listenerAddressForServeHTTP"
-)
-
-func init() {
- internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials {
- return srv.opts.creds
- }
- internal.IsRegisteredMethod = func(srv *Server, method string) bool {
- return srv.isRegisteredMethod(method)
- }
- internal.ServerFromContext = serverFromContext
- internal.AddGlobalServerOptions = func(opt ...ServerOption) {
- globalServerOptions = append(globalServerOptions, opt...)
- }
- internal.ClearGlobalServerOptions = func() {
- globalServerOptions = nil
- }
- internal.BinaryLogger = binaryLogger
- internal.JoinServerOptions = newJoinServerOption
- internal.BufferPool = bufferPool
- internal.MetricsRecorderForServer = func(srv *Server) estats.MetricsRecorder {
- return istats.NewMetricsRecorderList(srv.opts.statsHandlers)
- }
-}
-
-var statusOK = status.New(codes.OK, "")
-var logger = grpclog.Component("core")
-
-// MethodHandler is a function type that processes a unary RPC method call.
-type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error)
-
-// MethodDesc represents an RPC service's method specification.
-type MethodDesc struct {
- MethodName string
- Handler MethodHandler
-}
-
-// ServiceDesc represents an RPC service's specification.
-type ServiceDesc struct {
- ServiceName string
- // The pointer to the service interface. Used to check whether the user
- // provided implementation satisfies the interface requirements.
- HandlerType any
- Methods []MethodDesc
- Streams []StreamDesc
- Metadata any
-}
-
-// serviceInfo wraps information about a service. It is very similar to
-// ServiceDesc and is constructed from it for internal purposes.
-type serviceInfo struct {
- // Contains the implementation for the methods in this service.
- serviceImpl any
- methods map[string]*MethodDesc
- streams map[string]*StreamDesc
- mdata any
-}
-
-// Server is a gRPC server to serve RPC requests.
-type Server struct {
- opts serverOptions
-
- mu sync.Mutex // guards following
- lis map[net.Listener]bool
- // conns contains all active server transports. It is a map keyed on a
- // listener address with the value being the set of active transports
- // belonging to that listener.
- conns map[string]map[transport.ServerTransport]bool
- serve bool
- drain bool
- cv *sync.Cond // signaled when connections close for GracefulStop
- services map[string]*serviceInfo // service name -> service info
- events traceEventLog
-
- quit *grpcsync.Event
- done *grpcsync.Event
- channelzRemoveOnce sync.Once
- serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop
- handlersWG sync.WaitGroup // counts active method handler goroutines
-
- channelz *channelz.Server
-
- serverWorkerChannel chan func()
- serverWorkerChannelClose func()
-}
-
-type serverOptions struct {
- creds credentials.TransportCredentials
- codec baseCodec
- cp Compressor
- dc Decompressor
- unaryInt UnaryServerInterceptor
- streamInt StreamServerInterceptor
- chainUnaryInts []UnaryServerInterceptor
- chainStreamInts []StreamServerInterceptor
- binaryLogger binarylog.Logger
- inTapHandle tap.ServerInHandle
- statsHandlers []stats.Handler
- maxConcurrentStreams uint32
- maxReceiveMessageSize int
- maxSendMessageSize int
- unknownStreamDesc *StreamDesc
- keepaliveParams keepalive.ServerParameters
- keepalivePolicy keepalive.EnforcementPolicy
- initialWindowSize int32
- initialConnWindowSize int32
- writeBufferSize int
- readBufferSize int
- sharedWriteBuffer bool
- connectionTimeout time.Duration
- maxHeaderListSize *uint32
- headerTableSize *uint32
- numServerWorkers uint32
- bufferPool mem.BufferPool
- waitForHandlers bool
-}
-
-var defaultServerOptions = serverOptions{
- maxConcurrentStreams: math.MaxUint32,
- maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
- maxSendMessageSize: defaultServerMaxSendMessageSize,
- connectionTimeout: 120 * time.Second,
- writeBufferSize: defaultWriteBufSize,
- readBufferSize: defaultReadBufSize,
- bufferPool: mem.DefaultBufferPool(),
-}
-var globalServerOptions []ServerOption
-
-// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
-type ServerOption interface {
- apply(*serverOptions)
-}
-
-// EmptyServerOption does not alter the server configuration. It can be embedded
-// in another structure to build custom server options.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type EmptyServerOption struct{}
-
-func (EmptyServerOption) apply(*serverOptions) {}
-
-// funcServerOption wraps a function that modifies serverOptions into an
-// implementation of the ServerOption interface.
-type funcServerOption struct {
- f func(*serverOptions)
-}
-
-func (fdo *funcServerOption) apply(do *serverOptions) {
- fdo.f(do)
-}
-
-func newFuncServerOption(f func(*serverOptions)) *funcServerOption {
- return &funcServerOption{
- f: f,
- }
-}
-
-// joinServerOption provides a way to combine arbitrary number of server
-// options into one.
-type joinServerOption struct {
- opts []ServerOption
-}
-
-func (mdo *joinServerOption) apply(do *serverOptions) {
- for _, opt := range mdo.opts {
- opt.apply(do)
- }
-}
-
-func newJoinServerOption(opts ...ServerOption) ServerOption {
- return &joinServerOption{opts: opts}
-}
-
-// SharedWriteBuffer allows reusing per-connection transport write buffer.
-// If this option is set to true every connection will release the buffer after
-// flushing the data on the wire.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func SharedWriteBuffer(val bool) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.sharedWriteBuffer = val
- })
-}
-
-// WriteBufferSize determines how much data can be batched before doing a write
-// on the wire. The default value for this buffer is 32KB. Zero or negative
-// values will disable the write buffer such that each write will be on underlying
-// connection. Note: A Send call may not directly translate to a write.
-func WriteBufferSize(s int) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.writeBufferSize = s
- })
-}
-
-// ReadBufferSize lets you set the size of read buffer, this determines how much
-// data can be read at most for one read syscall. The default value for this
-// buffer is 32KB. Zero or negative values will disable read buffer for a
-// connection so data framer can access the underlying conn directly.
-func ReadBufferSize(s int) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.readBufferSize = s
- })
-}
-
-// InitialWindowSize returns a ServerOption that sets window size for stream.
-// The lower bound for window size is 64K and any value smaller than that will be ignored.
-func InitialWindowSize(s int32) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.initialWindowSize = s
- })
-}
-
-// InitialConnWindowSize returns a ServerOption that sets window size for a connection.
-// The lower bound for window size is 64K and any value smaller than that will be ignored.
-func InitialConnWindowSize(s int32) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.initialConnWindowSize = s
- })
-}
-
-// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
-func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
- if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime {
- logger.Warning("Adjusting keepalive ping interval to minimum period of 1s")
- kp.Time = internal.KeepaliveMinServerPingTime
- }
-
- return newFuncServerOption(func(o *serverOptions) {
- o.keepaliveParams = kp
- })
-}
-
-// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server.
-func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.keepalivePolicy = kep
- })
-}
-
-// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
-//
-// This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
-//
-// Deprecated: register codecs using encoding.RegisterCodec. The server will
-// automatically use registered codecs based on the incoming requests' headers.
-// See also
-// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
-// Will be supported throughout 1.x.
-func CustomCodec(codec Codec) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.codec = newCodecV0Bridge(codec)
- })
-}
-
-// ForceServerCodec returns a ServerOption that sets a codec for message
-// marshaling and unmarshaling.
-//
-// This will override any lookups by content-subtype for Codecs registered
-// with RegisterCodec.
-//
-// See Content-Type on
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details. Also see the documentation on RegisterCodec and
-// CallContentSubtype for more details on the interaction between encoding.Codec
-// and content-subtype.
-//
-// This function is provided for advanced users; prefer to register codecs
-// using encoding.RegisterCodec.
-// The server will automatically use registered codecs based on the incoming
-// requests' headers. See also
-// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
-// Will be supported throughout 1.x.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func ForceServerCodec(codec encoding.Codec) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.codec = newCodecV1Bridge(codec)
- })
-}
-
-// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new
-// CodecV2 interface.
-//
-// Will be supported throughout 1.x.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.codec = codecV2
- })
-}
-
-// RPCCompressor returns a ServerOption that sets a compressor for outbound
-// messages. For backward compatibility, all outbound messages will be sent
-// using this compressor, regardless of incoming message compression. By
-// default, server messages will be sent using the same compressor with which
-// request messages were sent.
-//
-// Deprecated: use encoding.RegisterCompressor instead. Will be supported
-// throughout 1.x.
-func RPCCompressor(cp Compressor) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.cp = cp
- })
-}
-
-// RPCDecompressor returns a ServerOption that sets a decompressor for inbound
-// messages. It has higher priority than decompressors registered via
-// encoding.RegisterCompressor.
-//
-// Deprecated: use encoding.RegisterCompressor instead. Will be supported
-// throughout 1.x.
-func RPCDecompressor(dc Decompressor) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.dc = dc
- })
-}
-
-// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
-// If this is not set, gRPC uses the default limit.
-//
-// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x.
-func MaxMsgSize(m int) ServerOption {
- return MaxRecvMsgSize(m)
-}
-
-// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
-// If this is not set, gRPC uses the default 4MB.
-func MaxRecvMsgSize(m int) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.maxReceiveMessageSize = m
- })
-}
-
-// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send.
-// If this is not set, gRPC uses the default `math.MaxInt32`.
-func MaxSendMsgSize(m int) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.maxSendMessageSize = m
- })
-}
-
-// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
-// of concurrent streams to each ServerTransport.
-func MaxConcurrentStreams(n uint32) ServerOption {
- if n == 0 {
- n = math.MaxUint32
- }
- return newFuncServerOption(func(o *serverOptions) {
- o.maxConcurrentStreams = n
- })
-}
-
-// Creds returns a ServerOption that sets credentials for server connections.
-func Creds(c credentials.TransportCredentials) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.creds = c
- })
-}
-
-// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the
-// server. Only one unary interceptor can be installed. The construction of multiple
-// interceptors (e.g., chaining) can be implemented at the caller.
-func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- if o.unaryInt != nil {
- panic("The unary server interceptor was already set and may not be reset.")
- }
- o.unaryInt = i
- })
-}
-
-// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor
-// for unary RPCs. The first interceptor will be the outer most,
-// while the last interceptor will be the inner most wrapper around the real call.
-// All unary interceptors added by this method will be chained.
-func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.chainUnaryInts = append(o.chainUnaryInts, interceptors...)
- })
-}
-
-// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
-// server. Only one stream interceptor can be installed.
-func StreamInterceptor(i StreamServerInterceptor) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- if o.streamInt != nil {
- panic("The stream server interceptor was already set and may not be reset.")
- }
- o.streamInt = i
- })
-}
-
-// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor
-// for streaming RPCs. The first interceptor will be the outer most,
-// while the last interceptor will be the inner most wrapper around the real call.
-// All stream interceptors added by this method will be chained.
-func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.chainStreamInts = append(o.chainStreamInts, interceptors...)
- })
-}
-
-// InTapHandle returns a ServerOption that sets the tap handle for all the server
-// transport to be created. Only one can be installed.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func InTapHandle(h tap.ServerInHandle) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- if o.inTapHandle != nil {
- panic("The tap handle was already set and may not be reset.")
- }
- o.inTapHandle = h
- })
-}
-
-// StatsHandler returns a ServerOption that sets the stats handler for the server.
-func StatsHandler(h stats.Handler) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- if h == nil {
- logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption")
- // Do not allow a nil stats handler, which would otherwise cause
- // panics.
- return
- }
- o.statsHandlers = append(o.statsHandlers, h)
- })
-}
-
-// binaryLogger returns a ServerOption that can set the binary logger for the
-// server.
-func binaryLogger(bl binarylog.Logger) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.binaryLogger = bl
- })
-}
-
-// UnknownServiceHandler returns a ServerOption that allows for adding a custom
-// unknown service handler. The provided method is a bidi-streaming RPC service
-// handler that will be invoked instead of returning the "unimplemented" gRPC
-// error whenever a request is received for an unregistered service or method.
-// The handling function and stream interceptor (if set) have full access to
-// the ServerStream, including its Context.
-func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.unknownStreamDesc = &StreamDesc{
- StreamName: "unknown_service_handler",
- Handler: streamHandler,
- // We need to assume that the users of the streamHandler will want to use both.
- ClientStreams: true,
- ServerStreams: true,
- }
- })
-}
-
-// ConnectionTimeout returns a ServerOption that sets the timeout for
-// connection establishment (up to and including HTTP/2 handshaking) for all
-// new connections. If this is not set, the default is 120 seconds. A zero or
-// negative value will result in an immediate timeout.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func ConnectionTimeout(d time.Duration) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.connectionTimeout = d
- })
-}
-
-// MaxHeaderListSizeServerOption is a ServerOption that sets the max
-// (uncompressed) size of header list that the server is prepared to accept.
-type MaxHeaderListSizeServerOption struct {
- MaxHeaderListSize uint32
-}
-
-func (o MaxHeaderListSizeServerOption) apply(so *serverOptions) {
- so.maxHeaderListSize = &o.MaxHeaderListSize
-}
-
-// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
-// of header list that the server is prepared to accept.
-func MaxHeaderListSize(s uint32) ServerOption {
- return MaxHeaderListSizeServerOption{
- MaxHeaderListSize: s,
- }
-}
-
-// HeaderTableSize returns a ServerOption that sets the size of dynamic
-// header table for stream.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func HeaderTableSize(s uint32) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.headerTableSize = &s
- })
-}
-
-// NumStreamWorkers returns a ServerOption that sets the number of worker
-// goroutines that should be used to process incoming streams. Setting this to
-// zero (default) will disable workers and spawn a new goroutine for each
-// stream.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func NumStreamWorkers(numServerWorkers uint32) ServerOption {
- // TODO: If/when this API gets stabilized (i.e. stream workers become the
- // only way streams are processed), change the behavior of the zero value to
- // a sane default. Preliminary experiments suggest that a value equal to the
- // number of CPUs available is most performant; requires thorough testing.
- return newFuncServerOption(func(o *serverOptions) {
- o.numServerWorkers = numServerWorkers
- })
-}
-
-// WaitForHandlers cause Stop to wait until all outstanding method handlers have
-// exited before returning. If false, Stop will return as soon as all
-// connections have closed, but method handlers may still be running. By
-// default, Stop does not wait for method handlers to return.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func WaitForHandlers(w bool) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.waitForHandlers = w
- })
-}
-
-func bufferPool(bufferPool mem.BufferPool) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.bufferPool = bufferPool
- })
-}
-
-// serverWorkerResetThreshold defines how often the stack must be reset. Every
-// N requests, by spawning a new goroutine in its place, a worker can reset its
-// stack so that large stacks don't live in memory forever. 2^16 should allow
-// each goroutine stack to live for at least a few seconds in a typical
-// workload (assuming a QPS of a few thousand requests/sec).
-const serverWorkerResetThreshold = 1 << 16
-
-// serverWorker blocks on a *transport.ServerStream channel forever and waits
-// for data to be fed by serveStreams. This allows multiple requests to be
-// processed by the same goroutine, removing the need for expensive stack
-// re-allocations (see the runtime.morestack problem [1]).
-//
-// [1] https://github.com/golang/go/issues/18138
-func (s *Server) serverWorker() {
- for completed := 0; completed < serverWorkerResetThreshold; completed++ {
- f, ok := <-s.serverWorkerChannel
- if !ok {
- return
- }
- f()
- }
- go s.serverWorker()
-}
-
-// initServerWorkers creates worker goroutines and a channel to process incoming
-// connections to reduce the time spent overall on runtime.morestack.
-func (s *Server) initServerWorkers() {
- s.serverWorkerChannel = make(chan func())
- s.serverWorkerChannelClose = sync.OnceFunc(func() {
- close(s.serverWorkerChannel)
- })
- for i := uint32(0); i < s.opts.numServerWorkers; i++ {
- go s.serverWorker()
- }
-}
-
-// NewServer creates a gRPC server which has no service registered and has not
-// started to accept requests yet.
-func NewServer(opt ...ServerOption) *Server {
- opts := defaultServerOptions
- for _, o := range globalServerOptions {
- o.apply(&opts)
- }
- for _, o := range opt {
- o.apply(&opts)
- }
- s := &Server{
- lis: make(map[net.Listener]bool),
- opts: opts,
- conns: make(map[string]map[transport.ServerTransport]bool),
- services: make(map[string]*serviceInfo),
- quit: grpcsync.NewEvent(),
- done: grpcsync.NewEvent(),
- channelz: channelz.RegisterServer(""),
- }
- chainUnaryServerInterceptors(s)
- chainStreamServerInterceptors(s)
- s.cv = sync.NewCond(&s.mu)
- if EnableTracing {
- _, file, line, _ := runtime.Caller(1)
- s.events = newTraceEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
- }
-
- if s.opts.numServerWorkers > 0 {
- s.initServerWorkers()
- }
-
- channelz.Info(logger, s.channelz, "Server created")
- return s
-}
-
-// printf records an event in s's event log, unless s has been stopped.
-// REQUIRES s.mu is held.
-func (s *Server) printf(format string, a ...any) {
- if s.events != nil {
- s.events.Printf(format, a...)
- }
-}
-
-// errorf records an error in s's event log, unless s has been stopped.
-// REQUIRES s.mu is held.
-func (s *Server) errorf(format string, a ...any) {
- if s.events != nil {
- s.events.Errorf(format, a...)
- }
-}
-
-// ServiceRegistrar wraps a single method that supports service registration. It
-// enables users to pass concrete types other than grpc.Server to the service
-// registration methods exported by the IDL generated code.
-type ServiceRegistrar interface {
- // RegisterService registers a service and its implementation to the
- // concrete type implementing this interface. It may not be called
- // once the server has started serving.
- // desc describes the service and its methods and handlers. impl is the
- // service implementation which is passed to the method handlers.
- RegisterService(desc *ServiceDesc, impl any)
-}
-
-// RegisterService registers a service and its implementation to the gRPC
-// server. It is called from the IDL generated code. This must be called before
-// invoking Serve. If ss is non-nil (for legacy code), its type is checked to
-// ensure it implements sd.HandlerType.
-func (s *Server) RegisterService(sd *ServiceDesc, ss any) {
- if ss != nil {
- ht := reflect.TypeOf(sd.HandlerType).Elem()
- st := reflect.TypeOf(ss)
- if !st.Implements(ht) {
- logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
- }
- }
- s.register(sd, ss)
-}
-
-func (s *Server) register(sd *ServiceDesc, ss any) {
- s.mu.Lock()
- defer s.mu.Unlock()
- s.printf("RegisterService(%q)", sd.ServiceName)
- if s.serve {
- logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
- }
- if _, ok := s.services[sd.ServiceName]; ok {
- logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
- }
- info := &serviceInfo{
- serviceImpl: ss,
- methods: make(map[string]*MethodDesc),
- streams: make(map[string]*StreamDesc),
- mdata: sd.Metadata,
- }
- for i := range sd.Methods {
- d := &sd.Methods[i]
- info.methods[d.MethodName] = d
- }
- for i := range sd.Streams {
- d := &sd.Streams[i]
- info.streams[d.StreamName] = d
- }
- s.services[sd.ServiceName] = info
-}
-
-// MethodInfo contains the information of an RPC including its method name and type.
-type MethodInfo struct {
- // Name is the method name only, without the service name or package name.
- Name string
- // IsClientStream indicates whether the RPC is a client streaming RPC.
- IsClientStream bool
- // IsServerStream indicates whether the RPC is a server streaming RPC.
- IsServerStream bool
-}
-
-// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service.
-type ServiceInfo struct {
- Methods []MethodInfo
- // Metadata is the metadata specified in ServiceDesc when registering service.
- Metadata any
-}
-
-// GetServiceInfo returns a map from service names to ServiceInfo.
-// Service names include the package names, in the form of ..
-func (s *Server) GetServiceInfo() map[string]ServiceInfo {
- ret := make(map[string]ServiceInfo)
- for n, srv := range s.services {
- methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams))
- for m := range srv.methods {
- methods = append(methods, MethodInfo{
- Name: m,
- IsClientStream: false,
- IsServerStream: false,
- })
- }
- for m, d := range srv.streams {
- methods = append(methods, MethodInfo{
- Name: m,
- IsClientStream: d.ClientStreams,
- IsServerStream: d.ServerStreams,
- })
- }
-
- ret[n] = ServiceInfo{
- Methods: methods,
- Metadata: srv.mdata,
- }
- }
- return ret
-}
-
-// ErrServerStopped indicates that the operation is now illegal because of
-// the server being stopped.
-var ErrServerStopped = errors.New("grpc: the server has been stopped")
-
-type listenSocket struct {
- net.Listener
- channelz *channelz.Socket
-}
-
-func (l *listenSocket) Close() error {
- err := l.Listener.Close()
- channelz.RemoveEntry(l.channelz.ID)
- channelz.Info(logger, l.channelz, "ListenSocket deleted")
- return err
-}
-
-// Serve accepts incoming connections on the listener lis, creating a new
-// ServerTransport and service goroutine for each. The service goroutines
-// read gRPC requests and then call the registered handlers to reply to them.
-// Serve returns when lis.Accept fails with fatal errors. lis will be closed when
-// this method returns.
-// Serve will return a non-nil error unless Stop or GracefulStop is called.
-//
-// Note: All supported releases of Go (as of December 2023) override the OS
-// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
-// with OS defaults for keepalive time and interval, callers need to do the
-// following two things:
-// - pass a net.Listener created by calling the Listen method on a
-// net.ListenConfig with the `KeepAlive` field set to a negative value. This
-// will result in the Go standard library not overriding OS defaults for TCP
-// keepalive interval and time. But this will also result in the Go standard
-// library not enabling TCP keepalives by default.
-// - override the Accept method on the passed in net.Listener and set the
-// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults.
-func (s *Server) Serve(lis net.Listener) error {
- s.mu.Lock()
- s.printf("serving")
- s.serve = true
- if s.lis == nil {
- // Serve called after Stop or GracefulStop.
- s.mu.Unlock()
- lis.Close()
- return ErrServerStopped
- }
-
- s.serveWG.Add(1)
- defer func() {
- s.serveWG.Done()
- if s.quit.HasFired() {
- // Stop or GracefulStop called; block until done and return nil.
- <-s.done.Done()
- }
- }()
-
- ls := &listenSocket{
- Listener: lis,
- channelz: channelz.RegisterSocket(&channelz.Socket{
- SocketType: channelz.SocketTypeListen,
- Parent: s.channelz,
- RefName: lis.Addr().String(),
- LocalAddr: lis.Addr(),
- SocketOptions: channelz.GetSocketOption(lis)},
- ),
- }
- s.lis[ls] = true
-
- defer func() {
- s.mu.Lock()
- if s.lis != nil && s.lis[ls] {
- ls.Close()
- delete(s.lis, ls)
- }
- s.mu.Unlock()
- }()
-
- s.mu.Unlock()
- channelz.Info(logger, ls.channelz, "ListenSocket created")
-
- var tempDelay time.Duration // how long to sleep on accept failure
- for {
- rawConn, err := lis.Accept()
- if err != nil {
- if ne, ok := err.(interface {
- Temporary() bool
- }); ok && ne.Temporary() {
- if tempDelay == 0 {
- tempDelay = 5 * time.Millisecond
- } else {
- tempDelay *= 2
- }
- if max := 1 * time.Second; tempDelay > max {
- tempDelay = max
- }
- s.mu.Lock()
- s.printf("Accept error: %v; retrying in %v", err, tempDelay)
- s.mu.Unlock()
- timer := time.NewTimer(tempDelay)
- select {
- case <-timer.C:
- case <-s.quit.Done():
- timer.Stop()
- return nil
- }
- continue
- }
- s.mu.Lock()
- s.printf("done serving; Accept = %v", err)
- s.mu.Unlock()
-
- if s.quit.HasFired() {
- return nil
- }
- return err
- }
- tempDelay = 0
- // Start a new goroutine to deal with rawConn so we don't stall this Accept
- // loop goroutine.
- //
- // Make sure we account for the goroutine so GracefulStop doesn't nil out
- // s.conns before this conn can be added.
- s.serveWG.Add(1)
- go func() {
- s.handleRawConn(lis.Addr().String(), rawConn)
- s.serveWG.Done()
- }()
- }
-}
-
-// handleRawConn forks a goroutine to handle a just-accepted connection that
-// has not had any I/O performed on it yet.
-func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
- if s.quit.HasFired() {
- rawConn.Close()
- return
- }
- rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
-
- // Finish handshaking (HTTP2)
- st := s.newHTTP2Transport(rawConn)
- rawConn.SetDeadline(time.Time{})
- if st == nil {
- return
- }
-
- if cc, ok := rawConn.(interface {
- PassServerTransport(transport.ServerTransport)
- }); ok {
- cc.PassServerTransport(st)
- }
-
- if !s.addConn(lisAddr, st) {
- return
- }
- go func() {
- s.serveStreams(context.Background(), st, rawConn)
- s.removeConn(lisAddr, st)
- }()
-}
-
-// newHTTP2Transport sets up a http/2 transport (using the
-// gRPC http2 server transport in transport/http2_server.go).
-func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
- config := &transport.ServerConfig{
- MaxStreams: s.opts.maxConcurrentStreams,
- ConnectionTimeout: s.opts.connectionTimeout,
- Credentials: s.opts.creds,
- InTapHandle: s.opts.inTapHandle,
- StatsHandlers: s.opts.statsHandlers,
- KeepaliveParams: s.opts.keepaliveParams,
- KeepalivePolicy: s.opts.keepalivePolicy,
- InitialWindowSize: s.opts.initialWindowSize,
- InitialConnWindowSize: s.opts.initialConnWindowSize,
- WriteBufferSize: s.opts.writeBufferSize,
- ReadBufferSize: s.opts.readBufferSize,
- SharedWriteBuffer: s.opts.sharedWriteBuffer,
- ChannelzParent: s.channelz,
- MaxHeaderListSize: s.opts.maxHeaderListSize,
- HeaderTableSize: s.opts.headerTableSize,
- BufferPool: s.opts.bufferPool,
- }
- st, err := transport.NewServerTransport(c, config)
- if err != nil {
- s.mu.Lock()
- s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
- s.mu.Unlock()
- // ErrConnDispatched means that the connection was dispatched away from
- // gRPC; those connections should be left open.
- if err != credentials.ErrConnDispatched {
- // Don't log on ErrConnDispatched and io.EOF to prevent log spam.
- if err != io.EOF {
- channelz.Info(logger, s.channelz, "grpc: Server.Serve failed to create ServerTransport: ", err)
- }
- c.Close()
- }
- return nil
- }
-
- return st
-}
-
-func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) {
- ctx = transport.SetConnection(ctx, rawConn)
- ctx = peer.NewContext(ctx, st.Peer())
- for _, sh := range s.opts.statsHandlers {
- ctx = sh.TagConn(ctx, &stats.ConnTagInfo{
- RemoteAddr: st.Peer().Addr,
- LocalAddr: st.Peer().LocalAddr,
- })
- sh.HandleConn(ctx, &stats.ConnBegin{})
- }
-
- defer func() {
- st.Close(errors.New("finished serving streams for the server transport"))
- for _, sh := range s.opts.statsHandlers {
- sh.HandleConn(ctx, &stats.ConnEnd{})
- }
- }()
-
- streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
- st.HandleStreams(ctx, func(stream *transport.ServerStream) {
- s.handlersWG.Add(1)
- streamQuota.acquire()
- f := func() {
- defer streamQuota.release()
- defer s.handlersWG.Done()
- s.handleStream(st, stream)
- }
-
- if s.opts.numServerWorkers > 0 {
- select {
- case s.serverWorkerChannel <- f:
- return
- default:
- // If all stream workers are busy, fallback to the default code path.
- }
- }
- go f()
- })
-}
-
-var _ http.Handler = (*Server)(nil)
-
-// ServeHTTP implements the Go standard library's http.Handler
-// interface by responding to the gRPC request r, by looking up
-// the requested gRPC method in the gRPC server s.
-//
-// The provided HTTP request must have arrived on an HTTP/2
-// connection. When using the Go standard library's server,
-// practically this means that the Request must also have arrived
-// over TLS.
-//
-// To share one port (such as 443 for https) between gRPC and an
-// existing http.Handler, use a root http.Handler such as:
-//
-// if r.ProtoMajor == 2 && strings.HasPrefix(
-// r.Header.Get("Content-Type"), "application/grpc") {
-// grpcServer.ServeHTTP(w, r)
-// } else {
-// yourMux.ServeHTTP(w, r)
-// }
-//
-// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally
-// separate from grpc-go's HTTP/2 server. Performance and features may vary
-// between the two paths. ServeHTTP does not support some gRPC features
-// available through grpc-go's HTTP/2 server.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool)
- if err != nil {
- // Errors returned from transport.NewServerHandlerTransport have
- // already been written to w.
- return
- }
- if !s.addConn(listenerAddressForServeHTTP, st) {
- return
- }
- defer s.removeConn(listenerAddressForServeHTTP, st)
- s.serveStreams(r.Context(), st, nil)
-}
-
-func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.conns == nil {
- st.Close(errors.New("Server.addConn called when server has already been stopped"))
- return false
- }
- if s.drain {
- // Transport added after we drained our existing conns: drain it
- // immediately.
- st.Drain("")
- }
-
- if s.conns[addr] == nil {
- // Create a map entry if this is the first connection on this listener.
- s.conns[addr] = make(map[transport.ServerTransport]bool)
- }
- s.conns[addr][st] = true
- return true
-}
-
-func (s *Server) removeConn(addr string, st transport.ServerTransport) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- conns := s.conns[addr]
- if conns != nil {
- delete(conns, st)
- if len(conns) == 0 {
- // If the last connection for this address is being removed, also
- // remove the map entry corresponding to the address. This is used
- // in GracefulStop() when waiting for all connections to be closed.
- delete(s.conns, addr)
- }
- s.cv.Broadcast()
- }
-}
-
-func (s *Server) incrCallsStarted() {
- s.channelz.ServerMetrics.CallsStarted.Add(1)
- s.channelz.ServerMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
-}
-
-func (s *Server) incrCallsSucceeded() {
- s.channelz.ServerMetrics.CallsSucceeded.Add(1)
-}
-
-func (s *Server) incrCallsFailed() {
- s.channelz.ServerMetrics.CallsFailed.Add(1)
-}
-
-func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error {
- data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
- if err != nil {
- channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err)
- return err
- }
-
- compData, pf, err := compress(data, cp, comp, s.opts.bufferPool)
- if err != nil {
- data.Free()
- channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err)
- return err
- }
-
- hdr, payload := msgHeader(data, compData, pf)
-
- defer func() {
- compData.Free()
- data.Free()
- // payload does not need to be freed here, it is either data or compData, both of
- // which are already freed.
- }()
-
- dataLen := data.Len()
- payloadLen := payload.Len()
- // TODO(dfawley): should we be checking len(data) instead?
- if payloadLen > s.opts.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
- }
- err = stream.Write(hdr, payload, opts)
- if err == nil {
- if len(s.opts.statsHandlers) != 0 {
- for _, sh := range s.opts.statsHandlers {
- sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
- }
- }
- }
- return err
-}
-
-// chainUnaryServerInterceptors chains all unary server interceptors into one.
-func chainUnaryServerInterceptors(s *Server) {
- // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will
- // be executed before any other chained interceptors.
- interceptors := s.opts.chainUnaryInts
- if s.opts.unaryInt != nil {
- interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...)
- }
-
- var chainedInt UnaryServerInterceptor
- if len(interceptors) == 0 {
- chainedInt = nil
- } else if len(interceptors) == 1 {
- chainedInt = interceptors[0]
- } else {
- chainedInt = chainUnaryInterceptors(interceptors)
- }
-
- s.opts.unaryInt = chainedInt
-}
-
-func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor {
- return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) {
- return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler))
- }
-}
-
-func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler {
- if curr == len(interceptors)-1 {
- return finalHandler
- }
- return func(ctx context.Context, req any) (any, error) {
- return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler))
- }
-}
-
-func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
- shs := s.opts.statsHandlers
- if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
- if channelz.IsOn() {
- s.incrCallsStarted()
- }
- var statsBegin *stats.Begin
- for _, sh := range shs {
- beginTime := time.Now()
- statsBegin = &stats.Begin{
- BeginTime: beginTime,
- IsClientStream: false,
- IsServerStream: false,
- }
- sh.HandleRPC(ctx, statsBegin)
- }
- if trInfo != nil {
- trInfo.tr.LazyLog(&trInfo.firstLine, false)
- }
- // The deferred error handling for tracing, stats handler and channelz are
- // combined into one function to reduce stack usage -- a defer takes ~56-64
- // bytes on the stack, so overflowing the stack will require a stack
- // re-allocation, which is expensive.
- //
- // To maintain behavior similar to separate deferred statements, statements
- // should be executed in the reverse order. That is, tracing first, stats
- // handler second, and channelz last. Note that panics *within* defers will
- // lead to different behavior, but that's an acceptable compromise; that
- // would be undefined behavior territory anyway.
- defer func() {
- if trInfo != nil {
- if err != nil && err != io.EOF {
- trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
- trInfo.tr.SetError()
- }
- trInfo.tr.Finish()
- }
-
- for _, sh := range shs {
- end := &stats.End{
- BeginTime: statsBegin.BeginTime,
- EndTime: time.Now(),
- }
- if err != nil && err != io.EOF {
- end.Error = toRPCErr(err)
- }
- sh.HandleRPC(ctx, end)
- }
-
- if channelz.IsOn() {
- if err != nil && err != io.EOF {
- s.incrCallsFailed()
- } else {
- s.incrCallsSucceeded()
- }
- }
- }()
- }
- var binlogs []binarylog.MethodLogger
- if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil {
- binlogs = append(binlogs, ml)
- }
- if s.opts.binaryLogger != nil {
- if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil {
- binlogs = append(binlogs, ml)
- }
- }
- if len(binlogs) != 0 {
- md, _ := metadata.FromIncomingContext(ctx)
- logEntry := &binarylog.ClientHeader{
- Header: md,
- MethodName: stream.Method(),
- PeerAddr: nil,
- }
- if deadline, ok := ctx.Deadline(); ok {
- logEntry.Timeout = time.Until(deadline)
- if logEntry.Timeout < 0 {
- logEntry.Timeout = 0
- }
- }
- if a := md[":authority"]; len(a) > 0 {
- logEntry.Authority = a[0]
- }
- if peer, ok := peer.FromContext(ctx); ok {
- logEntry.PeerAddr = peer.Addr
- }
- for _, binlog := range binlogs {
- binlog.Log(ctx, logEntry)
- }
- }
-
- // comp and cp are used for compression. decomp and dc are used for
- // decompression. If comp and decomp are both set, they are the same;
- // however they are kept separate to ensure that at most one of the
- // compressor/decompressor variable pairs are set for use later.
- var comp, decomp encoding.Compressor
- var cp Compressor
- var dc Decompressor
- var sendCompressorName string
-
- // If dc is set and matches the stream's compression, use it. Otherwise, try
- // to find a matching registered compressor for decomp.
- if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
- dc = s.opts.dc
- } else if rc != "" && rc != encoding.Identity {
- decomp = encoding.GetCompressor(rc)
- if decomp == nil {
- st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
- stream.WriteStatus(st)
- return st.Err()
- }
- }
-
- // If cp is set, use it. Otherwise, attempt to compress the response using
- // the incoming message compression method.
- //
- // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
- if s.opts.cp != nil {
- cp = s.opts.cp
- sendCompressorName = cp.Type()
- } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
- // Legacy compressor not specified; attempt to respond with same encoding.
- comp = encoding.GetCompressor(rc)
- if comp != nil {
- sendCompressorName = comp.Name()
- }
- }
-
- if sendCompressorName != "" {
- if err := stream.SetSendCompress(sendCompressorName); err != nil {
- return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err)
- }
- }
-
- var payInfo *payloadInfo
- if len(shs) != 0 || len(binlogs) != 0 {
- payInfo = &payloadInfo{}
- defer payInfo.free()
- }
-
- d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true)
- if err != nil {
- if e := stream.WriteStatus(status.Convert(err)); e != nil {
- channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
- }
- return err
- }
- freed := false
- dataFree := func() {
- if !freed {
- d.Free()
- freed = true
- }
- }
- defer dataFree()
- df := func(v any) error {
- defer dataFree()
- if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
- return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
- }
-
- for _, sh := range shs {
- sh.HandleRPC(ctx, &stats.InPayload{
- RecvTime: time.Now(),
- Payload: v,
- Length: d.Len(),
- WireLength: payInfo.compressedLength + headerLen,
- CompressedLength: payInfo.compressedLength,
- })
- }
- if len(binlogs) != 0 {
- cm := &binarylog.ClientMessage{
- Message: d.Materialize(),
- }
- for _, binlog := range binlogs {
- binlog.Log(ctx, cm)
- }
- }
- if trInfo != nil {
- trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
- }
- return nil
- }
- ctx = NewContextWithServerTransportStream(ctx, stream)
- reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt)
- if appErr != nil {
- appStatus, ok := status.FromError(appErr)
- if !ok {
- // Convert non-status application error to a status error with code
- // Unknown, but handle context errors specifically.
- appStatus = status.FromContextError(appErr)
- appErr = appStatus.Err()
- }
- if trInfo != nil {
- trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
- trInfo.tr.SetError()
- }
- if e := stream.WriteStatus(appStatus); e != nil {
- channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
- }
- if len(binlogs) != 0 {
- if h, _ := stream.Header(); h.Len() > 0 {
- // Only log serverHeader if there was header. Otherwise it can
- // be trailer only.
- sh := &binarylog.ServerHeader{
- Header: h,
- }
- for _, binlog := range binlogs {
- binlog.Log(ctx, sh)
- }
- }
- st := &binarylog.ServerTrailer{
- Trailer: stream.Trailer(),
- Err: appErr,
- }
- for _, binlog := range binlogs {
- binlog.Log(ctx, st)
- }
- }
- return appErr
- }
- if trInfo != nil {
- trInfo.tr.LazyLog(stringer("OK"), false)
- }
- opts := &transport.WriteOptions{Last: true}
-
- // Server handler could have set new compressor by calling SetSendCompressor.
- // In case it is set, we need to use it for compressing outbound message.
- if stream.SendCompress() != sendCompressorName {
- comp = encoding.GetCompressor(stream.SendCompress())
- }
- if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil {
- if err == io.EOF {
- // The entire stream is done (for unary RPC only).
- return err
- }
- if sts, ok := status.FromError(err); ok {
- if e := stream.WriteStatus(sts); e != nil {
- channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
- }
- } else {
- switch st := err.(type) {
- case transport.ConnectionError:
- // Nothing to do here.
- default:
- panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
- }
- }
- if len(binlogs) != 0 {
- h, _ := stream.Header()
- sh := &binarylog.ServerHeader{
- Header: h,
- }
- st := &binarylog.ServerTrailer{
- Trailer: stream.Trailer(),
- Err: appErr,
- }
- for _, binlog := range binlogs {
- binlog.Log(ctx, sh)
- binlog.Log(ctx, st)
- }
- }
- return err
- }
- if len(binlogs) != 0 {
- h, _ := stream.Header()
- sh := &binarylog.ServerHeader{
- Header: h,
- }
- sm := &binarylog.ServerMessage{
- Message: reply,
- }
- for _, binlog := range binlogs {
- binlog.Log(ctx, sh)
- binlog.Log(ctx, sm)
- }
- }
- if trInfo != nil {
- trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
- }
- // TODO: Should we be logging if writing status failed here, like above?
- // Should the logging be in WriteStatus? Should we ignore the WriteStatus
- // error or allow the stats handler to see it?
- if len(binlogs) != 0 {
- st := &binarylog.ServerTrailer{
- Trailer: stream.Trailer(),
- Err: appErr,
- }
- for _, binlog := range binlogs {
- binlog.Log(ctx, st)
- }
- }
- return stream.WriteStatus(statusOK)
-}
-
-// chainStreamServerInterceptors chains all stream server interceptors into one.
-func chainStreamServerInterceptors(s *Server) {
- // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will
- // be executed before any other chained interceptors.
- interceptors := s.opts.chainStreamInts
- if s.opts.streamInt != nil {
- interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...)
- }
-
- var chainedInt StreamServerInterceptor
- if len(interceptors) == 0 {
- chainedInt = nil
- } else if len(interceptors) == 1 {
- chainedInt = interceptors[0]
- } else {
- chainedInt = chainStreamInterceptors(interceptors)
- }
-
- s.opts.streamInt = chainedInt
-}
-
-func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor {
- return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
- return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler))
- }
-}
-
-func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler {
- if curr == len(interceptors)-1 {
- return finalHandler
- }
- return func(srv any, stream ServerStream) error {
- return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler))
- }
-}
-
-func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
- if channelz.IsOn() {
- s.incrCallsStarted()
- }
- shs := s.opts.statsHandlers
- var statsBegin *stats.Begin
- if len(shs) != 0 {
- beginTime := time.Now()
- statsBegin = &stats.Begin{
- BeginTime: beginTime,
- IsClientStream: sd.ClientStreams,
- IsServerStream: sd.ServerStreams,
- }
- for _, sh := range shs {
- sh.HandleRPC(ctx, statsBegin)
- }
- }
- ctx = NewContextWithServerTransportStream(ctx, stream)
- ss := &serverStream{
- ctx: ctx,
- s: stream,
- p: &parser{r: stream, bufferPool: s.opts.bufferPool},
- codec: s.getCodec(stream.ContentSubtype()),
- maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
- maxSendMessageSize: s.opts.maxSendMessageSize,
- trInfo: trInfo,
- statsHandler: shs,
- }
-
- if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
- // See comment in processUnaryRPC on defers.
- defer func() {
- if trInfo != nil {
- ss.mu.Lock()
- if err != nil && err != io.EOF {
- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
- ss.trInfo.tr.SetError()
- }
- ss.trInfo.tr.Finish()
- ss.trInfo.tr = nil
- ss.mu.Unlock()
- }
-
- if len(shs) != 0 {
- end := &stats.End{
- BeginTime: statsBegin.BeginTime,
- EndTime: time.Now(),
- }
- if err != nil && err != io.EOF {
- end.Error = toRPCErr(err)
- }
- for _, sh := range shs {
- sh.HandleRPC(ctx, end)
- }
- }
-
- if channelz.IsOn() {
- if err != nil && err != io.EOF {
- s.incrCallsFailed()
- } else {
- s.incrCallsSucceeded()
- }
- }
- }()
- }
-
- if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil {
- ss.binlogs = append(ss.binlogs, ml)
- }
- if s.opts.binaryLogger != nil {
- if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil {
- ss.binlogs = append(ss.binlogs, ml)
- }
- }
- if len(ss.binlogs) != 0 {
- md, _ := metadata.FromIncomingContext(ctx)
- logEntry := &binarylog.ClientHeader{
- Header: md,
- MethodName: stream.Method(),
- PeerAddr: nil,
- }
- if deadline, ok := ctx.Deadline(); ok {
- logEntry.Timeout = time.Until(deadline)
- if logEntry.Timeout < 0 {
- logEntry.Timeout = 0
- }
- }
- if a := md[":authority"]; len(a) > 0 {
- logEntry.Authority = a[0]
- }
- if peer, ok := peer.FromContext(ss.Context()); ok {
- logEntry.PeerAddr = peer.Addr
- }
- for _, binlog := range ss.binlogs {
- binlog.Log(ctx, logEntry)
- }
- }
-
- // If dc is set and matches the stream's compression, use it. Otherwise, try
- // to find a matching registered compressor for decomp.
- if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
- ss.decompressorV0 = s.opts.dc
- } else if rc != "" && rc != encoding.Identity {
- ss.decompressorV1 = encoding.GetCompressor(rc)
- if ss.decompressorV1 == nil {
- st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
- ss.s.WriteStatus(st)
- return st.Err()
- }
- }
-
- // If cp is set, use it. Otherwise, attempt to compress the response using
- // the incoming message compression method.
- //
- // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
- if s.opts.cp != nil {
- ss.compressorV0 = s.opts.cp
- ss.sendCompressorName = s.opts.cp.Type()
- } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
- // Legacy compressor not specified; attempt to respond with same encoding.
- ss.compressorV1 = encoding.GetCompressor(rc)
- if ss.compressorV1 != nil {
- ss.sendCompressorName = rc
- }
- }
-
- if ss.sendCompressorName != "" {
- if err := stream.SetSendCompress(ss.sendCompressorName); err != nil {
- return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err)
- }
- }
-
- ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.compressorV0, ss.compressorV1)
-
- if trInfo != nil {
- trInfo.tr.LazyLog(&trInfo.firstLine, false)
- }
- var appErr error
- var server any
- if info != nil {
- server = info.serviceImpl
- }
- if s.opts.streamInt == nil {
- appErr = sd.Handler(server, ss)
- } else {
- info := &StreamServerInfo{
- FullMethod: stream.Method(),
- IsClientStream: sd.ClientStreams,
- IsServerStream: sd.ServerStreams,
- }
- appErr = s.opts.streamInt(server, ss, info, sd.Handler)
- }
- if appErr != nil {
- appStatus, ok := status.FromError(appErr)
- if !ok {
- // Convert non-status application error to a status error with code
- // Unknown, but handle context errors specifically.
- appStatus = status.FromContextError(appErr)
- appErr = appStatus.Err()
- }
- if trInfo != nil {
- ss.mu.Lock()
- ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
- ss.trInfo.tr.SetError()
- ss.mu.Unlock()
- }
- if len(ss.binlogs) != 0 {
- st := &binarylog.ServerTrailer{
- Trailer: ss.s.Trailer(),
- Err: appErr,
- }
- for _, binlog := range ss.binlogs {
- binlog.Log(ctx, st)
- }
- }
- ss.s.WriteStatus(appStatus)
- // TODO: Should we log an error from WriteStatus here and below?
- return appErr
- }
- if trInfo != nil {
- ss.mu.Lock()
- ss.trInfo.tr.LazyLog(stringer("OK"), false)
- ss.mu.Unlock()
- }
- if len(ss.binlogs) != 0 {
- st := &binarylog.ServerTrailer{
- Trailer: ss.s.Trailer(),
- Err: appErr,
- }
- for _, binlog := range ss.binlogs {
- binlog.Log(ctx, st)
- }
- }
- return ss.s.WriteStatus(statusOK)
-}
-
-func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) {
- ctx := stream.Context()
- ctx = contextWithServer(ctx, s)
- var ti *traceInfo
- if EnableTracing {
- tr := newTrace("grpc.Recv."+methodFamily(stream.Method()), stream.Method())
- ctx = newTraceContext(ctx, tr)
- ti = &traceInfo{
- tr: tr,
- firstLine: firstLine{
- client: false,
- remoteAddr: t.Peer().Addr,
- },
- }
- if dl, ok := ctx.Deadline(); ok {
- ti.firstLine.deadline = time.Until(dl)
- }
- }
-
- sm := stream.Method()
- if sm != "" && sm[0] == '/' {
- sm = sm[1:]
- }
- pos := strings.LastIndex(sm, "/")
- if pos == -1 {
- if ti != nil {
- ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true)
- ti.tr.SetError()
- }
- errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
- if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil {
- if ti != nil {
- ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
- ti.tr.SetError()
- }
- channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err)
- }
- if ti != nil {
- ti.tr.Finish()
- }
- return
- }
- service := sm[:pos]
- method := sm[pos+1:]
-
- // FromIncomingContext is expensive: skip if there are no statsHandlers
- if len(s.opts.statsHandlers) > 0 {
- md, _ := metadata.FromIncomingContext(ctx)
- for _, sh := range s.opts.statsHandlers {
- ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
- sh.HandleRPC(ctx, &stats.InHeader{
- FullMethod: stream.Method(),
- RemoteAddr: t.Peer().Addr,
- LocalAddr: t.Peer().LocalAddr,
- Compression: stream.RecvCompress(),
- WireLength: stream.HeaderWireLength(),
- Header: md,
- })
- }
- }
- // To have calls in stream callouts work. Will delete once all stats handler
- // calls come from the gRPC layer.
- stream.SetContext(ctx)
-
- srv, knownService := s.services[service]
- if knownService {
- if md, ok := srv.methods[method]; ok {
- s.processUnaryRPC(ctx, stream, srv, md, ti)
- return
- }
- if sd, ok := srv.streams[method]; ok {
- s.processStreamingRPC(ctx, stream, srv, sd, ti)
- return
- }
- }
- // Unknown service, or known server unknown method.
- if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
- s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti)
- return
- }
- var errDesc string
- if !knownService {
- errDesc = fmt.Sprintf("unknown service %v", service)
- } else {
- errDesc = fmt.Sprintf("unknown method %v for service %v", method, service)
- }
- if ti != nil {
- ti.tr.LazyPrintf("%s", errDesc)
- ti.tr.SetError()
- }
- if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil {
- if ti != nil {
- ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
- ti.tr.SetError()
- }
- channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err)
- }
- if ti != nil {
- ti.tr.Finish()
- }
-}
-
-// The key to save ServerTransportStream in the context.
-type streamKey struct{}
-
-// NewContextWithServerTransportStream creates a new context from ctx and
-// attaches stream to it.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context {
- return context.WithValue(ctx, streamKey{}, stream)
-}
-
-// ServerTransportStream is a minimal interface that a transport stream must
-// implement. This can be used to mock an actual transport stream for tests of
-// handler code that use, for example, grpc.SetHeader (which requires some
-// stream to be in context).
-//
-// See also NewContextWithServerTransportStream.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type ServerTransportStream interface {
- Method() string
- SetHeader(md metadata.MD) error
- SendHeader(md metadata.MD) error
- SetTrailer(md metadata.MD) error
-}
-
-// ServerTransportStreamFromContext returns the ServerTransportStream saved in
-// ctx. Returns nil if the given context has no stream associated with it
-// (which implies it is not an RPC invocation context).
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream {
- s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
- return s
-}
-
-// Stop stops the gRPC server. It immediately closes all open
-// connections and listeners.
-// It cancels all active RPCs on the server side and the corresponding
-// pending RPCs on the client side will get notified by connection
-// errors.
-func (s *Server) Stop() {
- s.stop(false)
-}
-
-// GracefulStop stops the gRPC server gracefully. It stops the server from
-// accepting new connections and RPCs and blocks until all the pending RPCs are
-// finished.
-func (s *Server) GracefulStop() {
- s.stop(true)
-}
-
-func (s *Server) stop(graceful bool) {
- s.quit.Fire()
- defer s.done.Fire()
-
- s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelz.ID) })
- s.mu.Lock()
- s.closeListenersLocked()
- // Wait for serving threads to be ready to exit. Only then can we be sure no
- // new conns will be created.
- s.mu.Unlock()
- s.serveWG.Wait()
-
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if graceful {
- s.drainAllServerTransportsLocked()
- } else {
- s.closeServerTransportsLocked()
- }
-
- for len(s.conns) != 0 {
- s.cv.Wait()
- }
- s.conns = nil
-
- if s.opts.numServerWorkers > 0 {
- // Closing the channel (only once, via sync.OnceFunc) after all the
- // connections have been closed above ensures that there are no
- // goroutines executing the callback passed to st.HandleStreams (where
- // the channel is written to).
- s.serverWorkerChannelClose()
- }
-
- if graceful || s.opts.waitForHandlers {
- s.handlersWG.Wait()
- }
-
- if s.events != nil {
- s.events.Finish()
- s.events = nil
- }
-}
-
-// s.mu must be held by the caller.
-func (s *Server) closeServerTransportsLocked() {
- for _, conns := range s.conns {
- for st := range conns {
- st.Close(errors.New("Server.Stop called"))
- }
- }
-}
-
-// s.mu must be held by the caller.
-func (s *Server) drainAllServerTransportsLocked() {
- if !s.drain {
- for _, conns := range s.conns {
- for st := range conns {
- st.Drain("graceful_stop")
- }
- }
- s.drain = true
- }
-}
-
-// s.mu must be held by the caller.
-func (s *Server) closeListenersLocked() {
- for lis := range s.lis {
- lis.Close()
- }
- s.lis = nil
-}
-
-// contentSubtype must be lowercase
-// cannot return nil
-func (s *Server) getCodec(contentSubtype string) baseCodec {
- if s.opts.codec != nil {
- return s.opts.codec
- }
- if contentSubtype == "" {
- return getCodec(proto.Name)
- }
- codec := getCodec(contentSubtype)
- if codec == nil {
- logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name)
- return getCodec(proto.Name)
- }
- return codec
-}
-
-type serverKey struct{}
-
-// serverFromContext gets the Server from the context.
-func serverFromContext(ctx context.Context) *Server {
- s, _ := ctx.Value(serverKey{}).(*Server)
- return s
-}
-
-// contextWithServer sets the Server in the context.
-func contextWithServer(ctx context.Context, server *Server) context.Context {
- return context.WithValue(ctx, serverKey{}, server)
-}
-
-// isRegisteredMethod returns whether the passed in method is registered as a
-// method on the server. /service/method and service/method will match if the
-// service and method are registered on the server.
-func (s *Server) isRegisteredMethod(serviceMethod string) bool {
- if serviceMethod != "" && serviceMethod[0] == '/' {
- serviceMethod = serviceMethod[1:]
- }
- pos := strings.LastIndex(serviceMethod, "/")
- if pos == -1 { // Invalid method name syntax.
- return false
- }
- service := serviceMethod[:pos]
- method := serviceMethod[pos+1:]
- srv, knownService := s.services[service]
- if knownService {
- if _, ok := srv.methods[method]; ok {
- return true
- }
- if _, ok := srv.streams[method]; ok {
- return true
- }
- }
- return false
-}
-
-// SetHeader sets the header metadata to be sent from the server to the client.
-// The context provided must be the context passed to the server's handler.
-//
-// Streaming RPCs should prefer the SetHeader method of the ServerStream.
-//
-// When called multiple times, all the provided metadata will be merged. All
-// the metadata will be sent out when one of the following happens:
-//
-// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader.
-// - The first response message is sent. For unary handlers, this occurs when
-// the handler returns; for streaming handlers, this can happen when stream's
-// SendMsg method is called.
-// - An RPC status is sent out (error or success). This occurs when the handler
-// returns.
-//
-// SetHeader will fail if called after any of the events above.
-//
-// The error returned is compatible with the status package. However, the
-// status code will often not match the RPC status as seen by the client
-// application, and therefore, should not be relied upon for this purpose.
-func SetHeader(ctx context.Context, md metadata.MD) error {
- if md.Len() == 0 {
- return nil
- }
- stream := ServerTransportStreamFromContext(ctx)
- if stream == nil {
- return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
- }
- return stream.SetHeader(md)
-}
-
-// SendHeader sends header metadata. It may be called at most once, and may not
-// be called after any event that causes headers to be sent (see SetHeader for
-// a complete list). The provided md and headers set by SetHeader() will be
-// sent.
-//
-// The error returned is compatible with the status package. However, the
-// status code will often not match the RPC status as seen by the client
-// application, and therefore, should not be relied upon for this purpose.
-func SendHeader(ctx context.Context, md metadata.MD) error {
- stream := ServerTransportStreamFromContext(ctx)
- if stream == nil {
- return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
- }
- if err := stream.SendHeader(md); err != nil {
- return toRPCErr(err)
- }
- return nil
-}
-
-// SetSendCompressor sets a compressor for outbound messages from the server.
-// It must not be called after any event that causes headers to be sent
-// (see ServerStream.SetHeader for the complete list). Provided compressor is
-// used when below conditions are met:
-//
-// - compressor is registered via encoding.RegisterCompressor
-// - compressor name must exist in the client advertised compressor names
-// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to
-// get client supported compressor names.
-//
-// The context provided must be the context passed to the server's handler.
-// It must be noted that compressor name encoding.Identity disables the
-// outbound compression.
-// By default, server messages will be sent using the same compressor with
-// which request messages were sent.
-//
-// It is not safe to call SetSendCompressor concurrently with SendHeader and
-// SendMsg.
-//
-// # Experimental
-//
-// Notice: This function is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func SetSendCompressor(ctx context.Context, name string) error {
- stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream)
- if !ok || stream == nil {
- return fmt.Errorf("failed to fetch the stream from the given context")
- }
-
- if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil {
- return fmt.Errorf("unable to set send compressor: %w", err)
- }
-
- return stream.SetSendCompress(name)
-}
-
-// ClientSupportedCompressors returns compressor names advertised by the client
-// via grpc-accept-encoding header.
-//
-// The context provided must be the context passed to the server's handler.
-//
-// # Experimental
-//
-// Notice: This function is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func ClientSupportedCompressors(ctx context.Context) ([]string, error) {
- stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream)
- if !ok || stream == nil {
- return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx)
- }
-
- return stream.ClientAdvertisedCompressors(), nil
-}
-
-// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
-// When called more than once, all the provided metadata will be merged.
-//
-// The error returned is compatible with the status package. However, the
-// status code will often not match the RPC status as seen by the client
-// application, and therefore, should not be relied upon for this purpose.
-func SetTrailer(ctx context.Context, md metadata.MD) error {
- if md.Len() == 0 {
- return nil
- }
- stream := ServerTransportStreamFromContext(ctx)
- if stream == nil {
- return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
- }
- return stream.SetTrailer(md)
-}
-
-// Method returns the method string for the server context. The returned
-// string is in the format of "/service/method".
-func Method(ctx context.Context) (string, bool) {
- s := ServerTransportStreamFromContext(ctx)
- if s == nil {
- return "", false
- }
- return s.Method(), true
-}
-
-// validateSendCompressor returns an error when given compressor name cannot be
-// handled by the server or the client based on the advertised compressors.
-func validateSendCompressor(name string, clientCompressors []string) error {
- if name == encoding.Identity {
- return nil
- }
-
- if !grpcutil.IsCompressorNameRegistered(name) {
- return fmt.Errorf("compressor not registered %q", name)
- }
-
- for _, c := range clientCompressors {
- if c == name {
- return nil // found match
- }
- }
- return fmt.Errorf("client does not support compressor %q", name)
-}
-
-// atomicSemaphore implements a blocking, counting semaphore. acquire should be
-// called synchronously; release may be called asynchronously.
-type atomicSemaphore struct {
- n atomic.Int64
- wait chan struct{}
-}
-
-func (q *atomicSemaphore) acquire() {
- if q.n.Add(-1) < 0 {
- // We ran out of quota. Block until a release happens.
- <-q.wait
- }
-}
-
-func (q *atomicSemaphore) release() {
- // N.B. the "<= 0" check below should allow for this to work with multiple
- // concurrent calls to acquire, but also note that with synchronous calls to
- // acquire, as our system does, n will never be less than -1. There are
- // fairness issues (queuing) to consider if this was to be generalized.
- if q.n.Add(1) <= 0 {
- // An acquire was waiting on us. Unblock it.
- q.wait <- struct{}{}
- }
-}
-
-func newHandlerQuota(n uint32) *atomicSemaphore {
- a := &atomicSemaphore{wait: make(chan struct{}, 1)}
- a.n.Store(int64(n))
- return a
-}
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
deleted file mode 100644
index 8d451e07c..000000000
--- a/vendor/google.golang.org/grpc/service_config.go
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "reflect"
- "time"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/balancer/pickfirst"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/internal"
- "google.golang.org/grpc/internal/balancer/gracefulswitch"
- internalserviceconfig "google.golang.org/grpc/internal/serviceconfig"
- "google.golang.org/grpc/serviceconfig"
-)
-
-const maxInt = int(^uint(0) >> 1)
-
-// MethodConfig defines the configuration recommended by the service providers for a
-// particular method.
-//
-// Deprecated: Users should not use this struct. Service config should be received
-// through name resolver, as specified here
-// https://github.com/grpc/grpc/blob/master/doc/service_config.md
-type MethodConfig = internalserviceconfig.MethodConfig
-
-// ServiceConfig is provided by the service provider and contains parameters for how
-// clients that connect to the service should behave.
-//
-// Deprecated: Users should not use this struct. Service config should be received
-// through name resolver, as specified here
-// https://github.com/grpc/grpc/blob/master/doc/service_config.md
-type ServiceConfig struct {
- serviceconfig.Config
-
- // lbConfig is the service config's load balancing configuration. If
- // lbConfig and LB are both present, lbConfig will be used.
- lbConfig serviceconfig.LoadBalancingConfig
-
- // Methods contains a map for the methods in this service. If there is an
- // exact match for a method (i.e. /service/method) in the map, use the
- // corresponding MethodConfig. If there's no exact match, look for the
- // default config for the service (/service/) and use the corresponding
- // MethodConfig if it exists. Otherwise, the method has no MethodConfig to
- // use.
- Methods map[string]MethodConfig
-
- // If a retryThrottlingPolicy is provided, gRPC will automatically throttle
- // retry attempts and hedged RPCs when the client’s ratio of failures to
- // successes exceeds a threshold.
- //
- // For each server name, the gRPC client will maintain a token_count which is
- // initially set to maxTokens, and can take values between 0 and maxTokens.
- //
- // Every outgoing RPC (regardless of service or method invoked) will change
- // token_count as follows:
- //
- // - Every failed RPC will decrement the token_count by 1.
- // - Every successful RPC will increment the token_count by tokenRatio.
- //
- // If token_count is less than or equal to maxTokens / 2, then RPCs will not
- // be retried and hedged RPCs will not be sent.
- retryThrottling *retryThrottlingPolicy
- // healthCheckConfig must be set as one of the requirement to enable LB channel
- // health check.
- healthCheckConfig *healthCheckConfig
- // rawJSONString stores service config json string that get parsed into
- // this service config struct.
- rawJSONString string
-}
-
-// healthCheckConfig defines the go-native version of the LB channel health check config.
-type healthCheckConfig struct {
- // serviceName is the service name to use in the health-checking request.
- ServiceName string
-}
-
-type jsonRetryPolicy struct {
- MaxAttempts int
- InitialBackoff internalserviceconfig.Duration
- MaxBackoff internalserviceconfig.Duration
- BackoffMultiplier float64
- RetryableStatusCodes []codes.Code
-}
-
-// retryThrottlingPolicy defines the go-native version of the retry throttling
-// policy defined by the service config here:
-// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
-type retryThrottlingPolicy struct {
- // The number of tokens starts at maxTokens. The token_count will always be
- // between 0 and maxTokens.
- //
- // This field is required and must be greater than zero.
- MaxTokens float64
- // The amount of tokens to add on each successful RPC. Typically this will
- // be some number between 0 and 1, e.g., 0.1.
- //
- // This field is required and must be greater than zero. Up to 3 decimal
- // places are supported.
- TokenRatio float64
-}
-
-type jsonName struct {
- Service string
- Method string
-}
-
-var (
- errDuplicatedName = errors.New("duplicated name")
- errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'")
-)
-
-func (j jsonName) generatePath() (string, error) {
- if j.Service == "" {
- if j.Method != "" {
- return "", errEmptyServiceNonEmptyMethod
- }
- return "", nil
- }
- res := "/" + j.Service + "/"
- if j.Method != "" {
- res += j.Method
- }
- return res, nil
-}
-
-// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
-type jsonMC struct {
- Name *[]jsonName
- WaitForReady *bool
- Timeout *internalserviceconfig.Duration
- MaxRequestMessageBytes *int64
- MaxResponseMessageBytes *int64
- RetryPolicy *jsonRetryPolicy
-}
-
-// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
-type jsonSC struct {
- LoadBalancingPolicy *string
- LoadBalancingConfig *json.RawMessage
- MethodConfig *[]jsonMC
- RetryThrottling *retryThrottlingPolicy
- HealthCheckConfig *healthCheckConfig
-}
-
-func init() {
- internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult {
- return parseServiceConfig(js, defaultMaxCallAttempts)
- }
-}
-
-func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult {
- if len(js) == 0 {
- return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")}
- }
- var rsc jsonSC
- err := json.Unmarshal([]byte(js), &rsc)
- if err != nil {
- logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
- return &serviceconfig.ParseResult{Err: err}
- }
- sc := ServiceConfig{
- Methods: make(map[string]MethodConfig),
- retryThrottling: rsc.RetryThrottling,
- healthCheckConfig: rsc.HealthCheckConfig,
- rawJSONString: js,
- }
- c := rsc.LoadBalancingConfig
- if c == nil {
- name := pickfirst.Name
- if rsc.LoadBalancingPolicy != nil {
- name = *rsc.LoadBalancingPolicy
- }
- if balancer.Get(name) == nil {
- name = pickfirst.Name
- }
- cfg := []map[string]any{{name: struct{}{}}}
- strCfg, err := json.Marshal(cfg)
- if err != nil {
- return &serviceconfig.ParseResult{Err: fmt.Errorf("unexpected error marshaling simple LB config: %w", err)}
- }
- r := json.RawMessage(strCfg)
- c = &r
- }
- cfg, err := gracefulswitch.ParseConfig(*c)
- if err != nil {
- return &serviceconfig.ParseResult{Err: err}
- }
- sc.lbConfig = cfg
-
- if rsc.MethodConfig == nil {
- return &serviceconfig.ParseResult{Config: &sc}
- }
-
- paths := map[string]struct{}{}
- for _, m := range *rsc.MethodConfig {
- if m.Name == nil {
- continue
- }
-
- mc := MethodConfig{
- WaitForReady: m.WaitForReady,
- Timeout: (*time.Duration)(m.Timeout),
- }
- if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil {
- logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
- return &serviceconfig.ParseResult{Err: err}
- }
- if m.MaxRequestMessageBytes != nil {
- if *m.MaxRequestMessageBytes > int64(maxInt) {
- mc.MaxReqSize = newInt(maxInt)
- } else {
- mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes))
- }
- }
- if m.MaxResponseMessageBytes != nil {
- if *m.MaxResponseMessageBytes > int64(maxInt) {
- mc.MaxRespSize = newInt(maxInt)
- } else {
- mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes))
- }
- }
- for i, n := range *m.Name {
- path, err := n.generatePath()
- if err != nil {
- logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err)
- return &serviceconfig.ParseResult{Err: err}
- }
-
- if _, ok := paths[path]; ok {
- err = errDuplicatedName
- logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err)
- return &serviceconfig.ParseResult{Err: err}
- }
- paths[path] = struct{}{}
- sc.Methods[path] = mc
- }
- }
-
- if sc.retryThrottling != nil {
- if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 {
- return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)}
- }
- if tr := sc.retryThrottling.TokenRatio; tr <= 0 {
- return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)}
- }
- }
- return &serviceconfig.ParseResult{Config: &sc}
-}
-
-func isValidRetryPolicy(jrp *jsonRetryPolicy) bool {
- return jrp.MaxAttempts > 1 &&
- jrp.InitialBackoff > 0 &&
- jrp.MaxBackoff > 0 &&
- jrp.BackoffMultiplier > 0 &&
- len(jrp.RetryableStatusCodes) > 0
-}
-
-func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) {
- if jrp == nil {
- return nil, nil
- }
-
- if !isValidRetryPolicy(jrp) {
- return nil, fmt.Errorf("invalid retry policy (%+v): ", jrp)
- }
-
- if jrp.MaxAttempts < maxAttempts {
- maxAttempts = jrp.MaxAttempts
- }
- rp := &internalserviceconfig.RetryPolicy{
- MaxAttempts: maxAttempts,
- InitialBackoff: time.Duration(jrp.InitialBackoff),
- MaxBackoff: time.Duration(jrp.MaxBackoff),
- BackoffMultiplier: jrp.BackoffMultiplier,
- RetryableStatusCodes: make(map[codes.Code]bool),
- }
- for _, code := range jrp.RetryableStatusCodes {
- rp.RetryableStatusCodes[code] = true
- }
- return rp, nil
-}
-
-func minPointers(a, b *int) *int {
- if *a < *b {
- return a
- }
- return b
-}
-
-func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
- if mcMax == nil && doptMax == nil {
- return &defaultVal
- }
- if mcMax != nil && doptMax != nil {
- return minPointers(mcMax, doptMax)
- }
- if mcMax != nil {
- return mcMax
- }
- return doptMax
-}
-
-func newInt(b int) *int {
- return &b
-}
-
-func init() {
- internal.EqualServiceConfigForTesting = equalServiceConfig
-}
-
-// equalServiceConfig compares two configs. The rawJSONString field is ignored,
-// because they may diff in white spaces.
-//
-// If any of them is NOT *ServiceConfig, return false.
-func equalServiceConfig(a, b serviceconfig.Config) bool {
- if a == nil && b == nil {
- return true
- }
- aa, ok := a.(*ServiceConfig)
- if !ok {
- return false
- }
- bb, ok := b.(*ServiceConfig)
- if !ok {
- return false
- }
- aaRaw := aa.rawJSONString
- aa.rawJSONString = ""
- bbRaw := bb.rawJSONString
- bb.rawJSONString = ""
- defer func() {
- aa.rawJSONString = aaRaw
- bb.rawJSONString = bbRaw
- }()
- // Using reflect.DeepEqual instead of cmp.Equal because many balancer
- // configs are unexported, and cmp.Equal cannot compare unexported fields
- // from unexported structs.
- return reflect.DeepEqual(aa, bb)
-}
diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
deleted file mode 100644
index 35e7a20a0..000000000
--- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- *
- * Copyright 2019 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package serviceconfig defines types and methods for operating on gRPC
-// service configs.
-//
-// # Experimental
-//
-// Notice: This package is EXPERIMENTAL and may be changed or removed in a
-// later release.
-package serviceconfig
-
-// Config represents an opaque data structure holding a service config.
-type Config interface {
- isServiceConfig()
-}
-
-// LoadBalancingConfig represents an opaque data structure holding a load
-// balancing config.
-type LoadBalancingConfig interface {
- isLoadBalancingConfig()
-}
-
-// ParseResult contains a service config or an error. Exactly one must be
-// non-nil.
-type ParseResult struct {
- Config Config
- Err error
-}
diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go
deleted file mode 100644
index dc03731e4..000000000
--- a/vendor/google.golang.org/grpc/stats/handlers.go
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package stats
-
-import (
- "context"
- "net"
-)
-
-// ConnTagInfo defines the relevant information needed by connection context tagger.
-type ConnTagInfo struct {
- // RemoteAddr is the remote address of the corresponding connection.
- RemoteAddr net.Addr
- // LocalAddr is the local address of the corresponding connection.
- LocalAddr net.Addr
-}
-
-// RPCTagInfo defines the relevant information needed by RPC context tagger.
-type RPCTagInfo struct {
- // FullMethodName is the RPC method in the format of /package.service/method.
- FullMethodName string
- // FailFast indicates if this RPC is failfast.
- // This field is only valid on client side, it's always false on server side.
- FailFast bool
-}
-
-// Handler defines the interface for the related stats handling (e.g., RPCs, connections).
-type Handler interface {
- // TagRPC can attach some information to the given context.
- // The context used for the rest lifetime of the RPC will be derived from
- // the returned context.
- TagRPC(context.Context, *RPCTagInfo) context.Context
- // HandleRPC processes the RPC stats.
- HandleRPC(context.Context, RPCStats)
-
- // TagConn can attach some information to the given context.
- // The returned context will be used for stats handling.
- // For conn stats handling, the context used in HandleConn for this
- // connection will be derived from the context returned.
- // For RPC stats handling,
- // - On server side, the context used in HandleRPC for all RPCs on this
- // connection will be derived from the context returned.
- // - On client side, the context is not derived from the context returned.
- TagConn(context.Context, *ConnTagInfo) context.Context
- // HandleConn processes the Conn stats.
- HandleConn(context.Context, ConnStats)
-}
diff --git a/vendor/google.golang.org/grpc/stats/metrics.go b/vendor/google.golang.org/grpc/stats/metrics.go
deleted file mode 100644
index 641c8e979..000000000
--- a/vendor/google.golang.org/grpc/stats/metrics.go
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package stats
-
-import "maps"
-
-// MetricSet is a set of metrics to record. Once created, MetricSet is immutable,
-// however Add and Remove can make copies with specific metrics added or
-// removed, respectively.
-//
-// Do not construct directly; use NewMetricSet instead.
-type MetricSet struct {
- // metrics are the set of metrics to initialize.
- metrics map[string]bool
-}
-
-// NewMetricSet returns a MetricSet containing metricNames.
-func NewMetricSet(metricNames ...string) *MetricSet {
- newMetrics := make(map[string]bool)
- for _, metric := range metricNames {
- newMetrics[metric] = true
- }
- return &MetricSet{metrics: newMetrics}
-}
-
-// Metrics returns the metrics set. The returned map is read-only and must not
-// be modified.
-func (m *MetricSet) Metrics() map[string]bool {
- return m.metrics
-}
-
-// Add adds the metricNames to the metrics set and returns a new copy with the
-// additional metrics.
-func (m *MetricSet) Add(metricNames ...string) *MetricSet {
- newMetrics := make(map[string]bool)
- for metric := range m.metrics {
- newMetrics[metric] = true
- }
-
- for _, metric := range metricNames {
- newMetrics[metric] = true
- }
- return &MetricSet{metrics: newMetrics}
-}
-
-// Join joins the metrics passed in with the metrics set, and returns a new copy
-// with the merged metrics.
-func (m *MetricSet) Join(metrics *MetricSet) *MetricSet {
- newMetrics := make(map[string]bool)
- maps.Copy(newMetrics, m.metrics)
- maps.Copy(newMetrics, metrics.metrics)
- return &MetricSet{metrics: newMetrics}
-}
-
-// Remove removes the metricNames from the metrics set and returns a new copy
-// with the metrics removed.
-func (m *MetricSet) Remove(metricNames ...string) *MetricSet {
- newMetrics := make(map[string]bool)
- for metric := range m.metrics {
- newMetrics[metric] = true
- }
-
- for _, metric := range metricNames {
- delete(newMetrics, metric)
- }
- return &MetricSet{metrics: newMetrics}
-}
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
deleted file mode 100644
index 6f20d2d54..000000000
--- a/vendor/google.golang.org/grpc/stats/stats.go
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package stats is for collecting and reporting various network and RPC stats.
-// This package is for monitoring purpose only. All fields are read-only.
-// All APIs are experimental.
-package stats // import "google.golang.org/grpc/stats"
-
-import (
- "context"
- "net"
- "time"
-
- "google.golang.org/grpc/metadata"
-)
-
-// RPCStats contains stats information about RPCs.
-type RPCStats interface {
- isRPCStats()
- // IsClient returns true if this RPCStats is from client side.
- IsClient() bool
-}
-
-// Begin contains stats when an RPC attempt begins.
-// FailFast is only valid if this Begin is from client side.
-type Begin struct {
- // Client is true if this Begin is from client side.
- Client bool
- // BeginTime is the time when the RPC attempt begins.
- BeginTime time.Time
- // FailFast indicates if this RPC is failfast.
- FailFast bool
- // IsClientStream indicates whether the RPC is a client streaming RPC.
- IsClientStream bool
- // IsServerStream indicates whether the RPC is a server streaming RPC.
- IsServerStream bool
- // IsTransparentRetryAttempt indicates whether this attempt was initiated
- // due to transparently retrying a previous attempt.
- IsTransparentRetryAttempt bool
-}
-
-// IsClient indicates if the stats information is from client side.
-func (s *Begin) IsClient() bool { return s.Client }
-
-func (s *Begin) isRPCStats() {}
-
-// PickerUpdated indicates that the LB policy provided a new picker while the
-// RPC was waiting for one.
-type PickerUpdated struct{}
-
-// IsClient indicates if the stats information is from client side. Only Client
-// Side interfaces with a Picker, thus always returns true.
-func (*PickerUpdated) IsClient() bool { return true }
-
-func (*PickerUpdated) isRPCStats() {}
-
-// InPayload contains the information for an incoming payload.
-type InPayload struct {
- // Client is true if this InPayload is from client side.
- Client bool
- // Payload is the payload with original type. This may be modified after
- // the call to HandleRPC which provides the InPayload returns and must be
- // copied if needed later.
- Payload any
-
- // Length is the size of the uncompressed payload data. Does not include any
- // framing (gRPC or HTTP/2).
- Length int
- // CompressedLength is the size of the compressed payload data. Does not
- // include any framing (gRPC or HTTP/2). Same as Length if compression not
- // enabled.
- CompressedLength int
- // WireLength is the size of the compressed payload data plus gRPC framing.
- // Does not include HTTP/2 framing.
- WireLength int
-
- // RecvTime is the time when the payload is received.
- RecvTime time.Time
-}
-
-// IsClient indicates if the stats information is from client side.
-func (s *InPayload) IsClient() bool { return s.Client }
-
-func (s *InPayload) isRPCStats() {}
-
-// InHeader contains stats when a header is received.
-type InHeader struct {
- // Client is true if this InHeader is from client side.
- Client bool
- // WireLength is the wire length of header.
- WireLength int
- // Compression is the compression algorithm used for the RPC.
- Compression string
- // Header contains the header metadata received.
- Header metadata.MD
-
- // The following fields are valid only if Client is false.
- // FullMethod is the full RPC method string, i.e., /package.service/method.
- FullMethod string
- // RemoteAddr is the remote address of the corresponding connection.
- RemoteAddr net.Addr
- // LocalAddr is the local address of the corresponding connection.
- LocalAddr net.Addr
-}
-
-// IsClient indicates if the stats information is from client side.
-func (s *InHeader) IsClient() bool { return s.Client }
-
-func (s *InHeader) isRPCStats() {}
-
-// InTrailer contains stats when a trailer is received.
-type InTrailer struct {
- // Client is true if this InTrailer is from client side.
- Client bool
- // WireLength is the wire length of trailer.
- WireLength int
- // Trailer contains the trailer metadata received from the server. This
- // field is only valid if this InTrailer is from the client side.
- Trailer metadata.MD
-}
-
-// IsClient indicates if the stats information is from client side.
-func (s *InTrailer) IsClient() bool { return s.Client }
-
-func (s *InTrailer) isRPCStats() {}
-
-// OutPayload contains the information for an outgoing payload.
-type OutPayload struct {
- // Client is true if this OutPayload is from client side.
- Client bool
- // Payload is the payload with original type. This may be modified after
- // the call to HandleRPC which provides the OutPayload returns and must be
- // copied if needed later.
- Payload any
- // Length is the size of the uncompressed payload data. Does not include any
- // framing (gRPC or HTTP/2).
- Length int
- // CompressedLength is the size of the compressed payload data. Does not
- // include any framing (gRPC or HTTP/2). Same as Length if compression not
- // enabled.
- CompressedLength int
- // WireLength is the size of the compressed payload data plus gRPC framing.
- // Does not include HTTP/2 framing.
- WireLength int
- // SentTime is the time when the payload is sent.
- SentTime time.Time
-}
-
-// IsClient indicates if this stats information is from client side.
-func (s *OutPayload) IsClient() bool { return s.Client }
-
-func (s *OutPayload) isRPCStats() {}
-
-// OutHeader contains stats when a header is sent.
-type OutHeader struct {
- // Client is true if this OutHeader is from client side.
- Client bool
- // Compression is the compression algorithm used for the RPC.
- Compression string
- // Header contains the header metadata sent.
- Header metadata.MD
-
- // The following fields are valid only if Client is true.
- // FullMethod is the full RPC method string, i.e., /package.service/method.
- FullMethod string
- // RemoteAddr is the remote address of the corresponding connection.
- RemoteAddr net.Addr
- // LocalAddr is the local address of the corresponding connection.
- LocalAddr net.Addr
-}
-
-// IsClient indicates if this stats information is from client side.
-func (s *OutHeader) IsClient() bool { return s.Client }
-
-func (s *OutHeader) isRPCStats() {}
-
-// OutTrailer contains stats when a trailer is sent.
-type OutTrailer struct {
- // Client is true if this OutTrailer is from client side.
- Client bool
- // WireLength is the wire length of trailer.
- //
- // Deprecated: This field is never set. The length is not known when this message is
- // emitted because the trailer fields are compressed with hpack after that.
- WireLength int
- // Trailer contains the trailer metadata sent to the client. This
- // field is only valid if this OutTrailer is from the server side.
- Trailer metadata.MD
-}
-
-// IsClient indicates if this stats information is from client side.
-func (s *OutTrailer) IsClient() bool { return s.Client }
-
-func (s *OutTrailer) isRPCStats() {}
-
-// End contains stats when an RPC ends.
-type End struct {
- // Client is true if this End is from client side.
- Client bool
- // BeginTime is the time when the RPC began.
- BeginTime time.Time
- // EndTime is the time when the RPC ends.
- EndTime time.Time
- // Trailer contains the trailer metadata received from the server. This
- // field is only valid if this End is from the client side.
- // Deprecated: use Trailer in InTrailer instead.
- Trailer metadata.MD
- // Error is the error the RPC ended with. It is an error generated from
- // status.Status and can be converted back to status.Status using
- // status.FromError if non-nil.
- Error error
-}
-
-// IsClient indicates if this is from client side.
-func (s *End) IsClient() bool { return s.Client }
-
-func (s *End) isRPCStats() {}
-
-// ConnStats contains stats information about connections.
-type ConnStats interface {
- isConnStats()
- // IsClient returns true if this ConnStats is from client side.
- IsClient() bool
-}
-
-// ConnBegin contains the stats of a connection when it is established.
-type ConnBegin struct {
- // Client is true if this ConnBegin is from client side.
- Client bool
-}
-
-// IsClient indicates if this is from client side.
-func (s *ConnBegin) IsClient() bool { return s.Client }
-
-func (s *ConnBegin) isConnStats() {}
-
-// ConnEnd contains the stats of a connection when it ends.
-type ConnEnd struct {
- // Client is true if this ConnEnd is from client side.
- Client bool
-}
-
-// IsClient indicates if this is from client side.
-func (s *ConnEnd) IsClient() bool { return s.Client }
-
-func (s *ConnEnd) isConnStats() {}
-
-// SetTags attaches stats tagging data to the context, which will be sent in
-// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to
-// SetTags will overwrite the values from earlier calls.
-//
-// Deprecated: set the `grpc-tags-bin` header in the metadata instead.
-func SetTags(ctx context.Context, b []byte) context.Context {
- return metadata.AppendToOutgoingContext(ctx, "grpc-tags-bin", string(b))
-}
-
-// Tags returns the tags from the context for the inbound RPC.
-//
-// Deprecated: obtain the `grpc-tags-bin` header from metadata instead.
-func Tags(ctx context.Context) []byte {
- traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-tags-bin")
- if len(traceValues) == 0 {
- return nil
- }
- return []byte(traceValues[len(traceValues)-1])
-}
-
-// SetTrace attaches stats tagging data to the context, which will be sent in
-// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to
-// SetTrace will overwrite the values from earlier calls.
-//
-// Deprecated: set the `grpc-trace-bin` header in the metadata instead.
-func SetTrace(ctx context.Context, b []byte) context.Context {
- return metadata.AppendToOutgoingContext(ctx, "grpc-trace-bin", string(b))
-}
-
-// Trace returns the trace from the context for the inbound RPC.
-//
-// Deprecated: obtain the `grpc-trace-bin` header from metadata instead.
-func Trace(ctx context.Context) []byte {
- traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-trace-bin")
- if len(traceValues) == 0 {
- return nil
- }
- return []byte(traceValues[len(traceValues)-1])
-}
diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go
deleted file mode 100644
index a93360efb..000000000
--- a/vendor/google.golang.org/grpc/status/status.go
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package status implements errors returned by gRPC. These errors are
-// serialized and transmitted on the wire between server and client, and allow
-// for additional data to be transmitted via the Details field in the status
-// proto. gRPC service handlers should return an error created by this
-// package, and gRPC clients should expect a corresponding error to be
-// returned from the RPC call.
-//
-// This package upholds the invariants that a non-nil error may not
-// contain an OK code, and an OK code must result in a nil error.
-package status
-
-import (
- "context"
- "errors"
- "fmt"
-
- spb "google.golang.org/genproto/googleapis/rpc/status"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/internal/status"
-)
-
-// Status references google.golang.org/grpc/internal/status. It represents an
-// RPC status code, message, and details. It is immutable and should be
-// created with New, Newf, or FromProto.
-// https://godoc.org/google.golang.org/grpc/internal/status
-type Status = status.Status
-
-// New returns a Status representing c and msg.
-func New(c codes.Code, msg string) *Status {
- return status.New(c, msg)
-}
-
-// Newf returns New(c, fmt.Sprintf(format, a...)).
-func Newf(c codes.Code, format string, a ...any) *Status {
- return New(c, fmt.Sprintf(format, a...))
-}
-
-// Error returns an error representing c and msg. If c is OK, returns nil.
-func Error(c codes.Code, msg string) error {
- return New(c, msg).Err()
-}
-
-// Errorf returns Error(c, fmt.Sprintf(format, a...)).
-func Errorf(c codes.Code, format string, a ...any) error {
- return Error(c, fmt.Sprintf(format, a...))
-}
-
-// ErrorProto returns an error representing s. If s.Code is OK, returns nil.
-func ErrorProto(s *spb.Status) error {
- return FromProto(s).Err()
-}
-
-// FromProto returns a Status representing s.
-func FromProto(s *spb.Status) *Status {
- return status.FromProto(s)
-}
-
-// FromError returns a Status representation of err.
-//
-// - If err was produced by this package or implements the method `GRPCStatus()
-// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type
-// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped
-// errors, the message returned contains the entire err.Error() text and not
-// just the wrapped status. In that case, ok is true.
-//
-// - If err is nil, a Status is returned with codes.OK and no message, and ok
-// is true.
-//
-// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()`
-// returns nil (which maps to Codes.OK), or if err wraps a type
-// satisfying this, a Status is returned with codes.Unknown and err's
-// Error() message, and ok is false.
-//
-// - Otherwise, err is an error not compatible with this package. In this
-// case, a Status is returned with codes.Unknown and err's Error() message,
-// and ok is false.
-func FromError(err error) (s *Status, ok bool) {
- if err == nil {
- return nil, true
- }
- type grpcstatus interface{ GRPCStatus() *Status }
- if gs, ok := err.(grpcstatus); ok {
- grpcStatus := gs.GRPCStatus()
- if grpcStatus == nil {
- // Error has status nil, which maps to codes.OK. There
- // is no sensible behavior for this, so we turn it into
- // an error with codes.Unknown and discard the existing
- // status.
- return New(codes.Unknown, err.Error()), false
- }
- return grpcStatus, true
- }
- var gs grpcstatus
- if errors.As(err, &gs) {
- grpcStatus := gs.GRPCStatus()
- if grpcStatus == nil {
- // Error wraps an error that has status nil, which maps
- // to codes.OK. There is no sensible behavior for this,
- // so we turn it into an error with codes.Unknown and
- // discard the existing status.
- return New(codes.Unknown, err.Error()), false
- }
- p := grpcStatus.Proto()
- p.Message = err.Error()
- return status.FromProto(p), true
- }
- return New(codes.Unknown, err.Error()), false
-}
-
-// Convert is a convenience function which removes the need to handle the
-// boolean return value from FromError.
-func Convert(err error) *Status {
- s, _ := FromError(err)
- return s
-}
-
-// Code returns the Code of the error if it is a Status error or if it wraps a
-// Status error. If that is not the case, it returns codes.OK if err is nil, or
-// codes.Unknown otherwise.
-func Code(err error) codes.Code {
- // Don't use FromError to avoid allocation of OK status.
- if err == nil {
- return codes.OK
- }
-
- return Convert(err).Code()
-}
-
-// FromContextError converts a context error or wrapped context error into a
-// Status. It returns a Status with codes.OK if err is nil, or a Status with
-// codes.Unknown if err is non-nil and not a context error.
-func FromContextError(err error) *Status {
- if err == nil {
- return nil
- }
- if errors.Is(err, context.DeadlineExceeded) {
- return New(codes.DeadlineExceeded, err.Error())
- }
- if errors.Is(err, context.Canceled) {
- return New(codes.Canceled, err.Error())
- }
- return New(codes.Unknown, err.Error())
-}
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
deleted file mode 100644
index 12163150b..000000000
--- a/vendor/google.golang.org/grpc/stream.go
+++ /dev/null
@@ -1,1825 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
- "errors"
- "io"
- "math"
- rand "math/rand/v2"
- "strconv"
- "sync"
- "time"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/encoding"
- "google.golang.org/grpc/internal"
- "google.golang.org/grpc/internal/balancerload"
- "google.golang.org/grpc/internal/binarylog"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcutil"
- imetadata "google.golang.org/grpc/internal/metadata"
- iresolver "google.golang.org/grpc/internal/resolver"
- "google.golang.org/grpc/internal/serviceconfig"
- istatus "google.golang.org/grpc/internal/status"
- "google.golang.org/grpc/internal/transport"
- "google.golang.org/grpc/mem"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
-)
-
-var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
-
-// StreamHandler defines the handler called by gRPC server to complete the
-// execution of a streaming RPC.
-//
-// If a StreamHandler returns an error, it should either be produced by the
-// status package, or be one of the context errors. Otherwise, gRPC will use
-// codes.Unknown as the status code and err.Error() as the status message of the
-// RPC.
-type StreamHandler func(srv any, stream ServerStream) error
-
-// StreamDesc represents a streaming RPC service's method specification. Used
-// on the server when registering services and on the client when initiating
-// new streams.
-type StreamDesc struct {
- // StreamName and Handler are only used when registering handlers on a
- // server.
- StreamName string // the name of the method excluding the service
- Handler StreamHandler // the handler called for the method
-
- // ServerStreams and ClientStreams are used for registering handlers on a
- // server as well as defining RPC behavior when passed to NewClientStream
- // and ClientConn.NewStream. At least one must be true.
- ServerStreams bool // indicates the server can perform streaming sends
- ClientStreams bool // indicates the client can perform streaming sends
-}
-
-// Stream defines the common interface a client or server stream has to satisfy.
-//
-// Deprecated: See ClientStream and ServerStream documentation instead.
-type Stream interface {
- // Deprecated: See ClientStream and ServerStream documentation instead.
- Context() context.Context
- // Deprecated: See ClientStream and ServerStream documentation instead.
- SendMsg(m any) error
- // Deprecated: See ClientStream and ServerStream documentation instead.
- RecvMsg(m any) error
-}
-
-// ClientStream defines the client-side behavior of a streaming RPC.
-//
-// All errors returned from ClientStream methods are compatible with the
-// status package.
-type ClientStream interface {
- // Header returns the header metadata received from the server if there
- // is any. It blocks if the metadata is not ready to read. If the metadata
- // is nil and the error is also nil, then the stream was terminated without
- // headers, and the status can be discovered by calling RecvMsg.
- Header() (metadata.MD, error)
- // Trailer returns the trailer metadata from the server, if there is any.
- // It must only be called after stream.CloseAndRecv has returned, or
- // stream.Recv has returned a non-nil error (including io.EOF).
- Trailer() metadata.MD
- // CloseSend closes the send direction of the stream. It closes the stream
- // when non-nil error is met. It is also not safe to call CloseSend
- // concurrently with SendMsg.
- CloseSend() error
- // Context returns the context for this stream.
- //
- // It should not be called until after Header or RecvMsg has returned. Once
- // called, subsequent client-side retries are disabled.
- Context() context.Context
- // SendMsg is generally called by generated code. On error, SendMsg aborts
- // the stream. If the error was generated by the client, the status is
- // returned directly; otherwise, io.EOF is returned and the status of
- // the stream may be discovered using RecvMsg. For unary or server-streaming
- // RPCs (StreamDesc.ClientStreams is false), a nil error is returned
- // unconditionally.
- //
- // SendMsg blocks until:
- // - There is sufficient flow control to schedule m with the transport, or
- // - The stream is done, or
- // - The stream breaks.
- //
- // SendMsg does not wait until the message is received by the server. An
- // untimely stream closure may result in lost messages. To ensure delivery,
- // users should ensure the RPC completed successfully using RecvMsg.
- //
- // It is safe to have a goroutine calling SendMsg and another goroutine
- // calling RecvMsg on the same stream at the same time, but it is not safe
- // to call SendMsg on the same stream in different goroutines. It is also
- // not safe to call CloseSend concurrently with SendMsg.
- //
- // It is not safe to modify the message after calling SendMsg. Tracing
- // libraries and stats handlers may use the message lazily.
- SendMsg(m any) error
- // RecvMsg blocks until it receives a message into m or the stream is
- // done. It returns io.EOF when the stream completes successfully. On
- // any other error, the stream is aborted and the error contains the RPC
- // status.
- //
- // It is safe to have a goroutine calling SendMsg and another goroutine
- // calling RecvMsg on the same stream at the same time, but it is not
- // safe to call RecvMsg on the same stream in different goroutines.
- RecvMsg(m any) error
-}
-
-// NewStream creates a new Stream for the client side. This is typically
-// called by generated code. ctx is used for the lifetime of the stream.
-//
-// To ensure resources are not leaked due to the stream returned, one of the following
-// actions must be performed:
-//
-// 1. Call Close on the ClientConn.
-// 2. Cancel the context provided.
-// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
-// client-streaming RPC, for instance, might use the helper function
-// CloseAndRecv (note that CloseSend does not Recv, therefore is not
-// guaranteed to release all resources).
-// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
-//
-// If none of the above happen, a goroutine and a context will be leaked, and grpc
-// will not call the optionally-configured stats handler with a stats.End message.
-func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
- // allow interceptor to see all applicable call options, which means those
- // configured as defaults from dial option as well as per-call options
- opts = combine(cc.dopts.callOptions, opts)
-
- if cc.dopts.streamInt != nil {
- return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
- }
- return newClientStream(ctx, desc, cc, method, opts...)
-}
-
-// NewClientStream is a wrapper for ClientConn.NewStream.
-func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
- return cc.NewStream(ctx, desc, method, opts...)
-}
-
-func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
- // Start tracking the RPC for idleness purposes. This is where a stream is
- // created for both streaming and unary RPCs, and hence is a good place to
- // track active RPC count.
- if err := cc.idlenessMgr.OnCallBegin(); err != nil {
- return nil, err
- }
- // Add a calloption, to decrement the active call count, that gets executed
- // when the RPC completes.
- opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...)
-
- if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
- // validate md
- if err := imetadata.Validate(md); err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- // validate added
- for _, kvs := range added {
- for i := 0; i < len(kvs); i += 2 {
- if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- }
- }
- }
- if channelz.IsOn() {
- cc.incrCallsStarted()
- defer func() {
- if err != nil {
- cc.incrCallsFailed()
- }
- }()
- }
- // Provide an opportunity for the first RPC to see the first service config
- // provided by the resolver.
- if err := cc.waitForResolvedAddrs(ctx); err != nil {
- return nil, err
- }
-
- var mc serviceconfig.MethodConfig
- var onCommit func()
- newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
- return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...)
- }
-
- rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method}
- rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo)
- if err != nil {
- if st, ok := status.FromError(err); ok {
- // Restrict the code to the list allowed by gRFC A54.
- if istatus.IsRestrictedControlPlaneCode(st) {
- err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err)
- }
- return nil, err
- }
- return nil, toRPCErr(err)
- }
-
- if rpcConfig != nil {
- if rpcConfig.Context != nil {
- ctx = rpcConfig.Context
- }
- mc = rpcConfig.MethodConfig
- onCommit = rpcConfig.OnCommitted
- if rpcConfig.Interceptor != nil {
- rpcInfo.Context = nil
- ns := newStream
- newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
- cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns)
- if err != nil {
- return nil, toRPCErr(err)
- }
- return cs, nil
- }
- }
- }
-
- return newStream(ctx, func() {})
-}
-
-func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) {
- callInfo := defaultCallInfo()
- if mc.WaitForReady != nil {
- callInfo.failFast = !*mc.WaitForReady
- }
-
- // Possible context leak:
- // The cancel function for the child context we create will only be called
- // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
- // an error is generated by SendMsg.
- // https://github.com/grpc/grpc-go/issues/1818.
- var cancel context.CancelFunc
- if mc.Timeout != nil && *mc.Timeout >= 0 {
- ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
- } else {
- ctx, cancel = context.WithCancel(ctx)
- }
- defer func() {
- if err != nil {
- cancel()
- }
- }()
-
- for _, o := range opts {
- if err := o.before(callInfo); err != nil {
- return nil, toRPCErr(err)
- }
- }
- callInfo.maxSendMessageSize = getMaxSize(mc.MaxReqSize, callInfo.maxSendMessageSize, defaultClientMaxSendMessageSize)
- callInfo.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, callInfo.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
- if err := setCallInfoCodec(callInfo); err != nil {
- return nil, err
- }
-
- callHdr := &transport.CallHdr{
- Host: cc.authority,
- Method: method,
- ContentSubtype: callInfo.contentSubtype,
- DoneFunc: doneFunc,
- }
-
- // Set our outgoing compression according to the UseCompressor CallOption, if
- // set. In that case, also find the compressor from the encoding package.
- // Otherwise, use the compressor configured by the WithCompressor DialOption,
- // if set.
- var compressorV0 Compressor
- var compressorV1 encoding.Compressor
- if ct := callInfo.compressorName; ct != "" {
- callHdr.SendCompress = ct
- if ct != encoding.Identity {
- compressorV1 = encoding.GetCompressor(ct)
- if compressorV1 == nil {
- return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
- }
- }
- } else if cc.dopts.compressorV0 != nil {
- callHdr.SendCompress = cc.dopts.compressorV0.Type()
- compressorV0 = cc.dopts.compressorV0
- }
- if callInfo.creds != nil {
- callHdr.Creds = callInfo.creds
- }
-
- cs := &clientStream{
- callHdr: callHdr,
- ctx: ctx,
- methodConfig: &mc,
- opts: opts,
- callInfo: callInfo,
- cc: cc,
- desc: desc,
- codec: callInfo.codec,
- compressorV0: compressorV0,
- compressorV1: compressorV1,
- cancel: cancel,
- firstAttempt: true,
- onCommit: onCommit,
- }
- if !cc.dopts.disableRetry {
- cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
- }
- if ml := binarylog.GetMethodLogger(method); ml != nil {
- cs.binlogs = append(cs.binlogs, ml)
- }
- if cc.dopts.binaryLogger != nil {
- if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil {
- cs.binlogs = append(cs.binlogs, ml)
- }
- }
-
- // Pick the transport to use and create a new stream on the transport.
- // Assign cs.attempt upon success.
- op := func(a *csAttempt) error {
- if err := a.getTransport(); err != nil {
- return err
- }
- if err := a.newStream(); err != nil {
- return err
- }
- // Because this operation is always called either here (while creating
- // the clientStream) or by the retry code while locked when replaying
- // the operation, it is safe to access cs.attempt directly.
- cs.attempt = a
- return nil
- }
- if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil {
- return nil, err
- }
-
- if len(cs.binlogs) != 0 {
- md, _ := metadata.FromOutgoingContext(ctx)
- logEntry := &binarylog.ClientHeader{
- OnClientSide: true,
- Header: md,
- MethodName: method,
- Authority: cs.cc.authority,
- }
- if deadline, ok := ctx.Deadline(); ok {
- logEntry.Timeout = time.Until(deadline)
- if logEntry.Timeout < 0 {
- logEntry.Timeout = 0
- }
- }
- for _, binlog := range cs.binlogs {
- binlog.Log(cs.ctx, logEntry)
- }
- }
-
- if desc != unaryStreamDesc {
- // Listen on cc and stream contexts to cleanup when the user closes the
- // ClientConn or cancels the stream context. In all other cases, an error
- // should already be injected into the recv buffer by the transport, which
- // the client will eventually receive, and then we will cancel the stream's
- // context in clientStream.finish.
- go func() {
- select {
- case <-cc.ctx.Done():
- cs.finish(ErrClientConnClosing)
- case <-ctx.Done():
- cs.finish(toRPCErr(ctx.Err()))
- }
- }()
- }
- return cs, nil
-}
-
-// newAttemptLocked creates a new csAttempt without a transport or stream.
-func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) {
- if err := cs.ctx.Err(); err != nil {
- return nil, toRPCErr(err)
- }
- if err := cs.cc.ctx.Err(); err != nil {
- return nil, ErrClientConnClosing
- }
-
- ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1)
- method := cs.callHdr.Method
- var beginTime time.Time
- shs := cs.cc.dopts.copts.StatsHandlers
- for _, sh := range shs {
- ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast})
- beginTime = time.Now()
- begin := &stats.Begin{
- Client: true,
- BeginTime: beginTime,
- FailFast: cs.callInfo.failFast,
- IsClientStream: cs.desc.ClientStreams,
- IsServerStream: cs.desc.ServerStreams,
- IsTransparentRetryAttempt: isTransparent,
- }
- sh.HandleRPC(ctx, begin)
- }
-
- var trInfo *traceInfo
- if EnableTracing {
- trInfo = &traceInfo{
- tr: newTrace("grpc.Sent."+methodFamily(method), method),
- firstLine: firstLine{
- client: true,
- },
- }
- if deadline, ok := ctx.Deadline(); ok {
- trInfo.firstLine.deadline = time.Until(deadline)
- }
- trInfo.tr.LazyLog(&trInfo.firstLine, false)
- ctx = newTraceContext(ctx, trInfo.tr)
- }
-
- if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata {
- // Add extra metadata (metadata that will be added by transport) to context
- // so the balancer can see them.
- ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
- "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
- ))
- }
-
- return &csAttempt{
- ctx: ctx,
- beginTime: beginTime,
- cs: cs,
- decompressorV0: cs.cc.dopts.dc,
- statsHandlers: shs,
- trInfo: trInfo,
- }, nil
-}
-
-func (a *csAttempt) getTransport() error {
- cs := a.cs
-
- var err error
- a.transport, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
- if err != nil {
- if de, ok := err.(dropError); ok {
- err = de.error
- a.drop = true
- }
- return err
- }
- if a.trInfo != nil {
- a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr())
- }
- return nil
-}
-
-func (a *csAttempt) newStream() error {
- cs := a.cs
- cs.callHdr.PreviousAttempts = cs.numRetries
-
- // Merge metadata stored in PickResult, if any, with existing call metadata.
- // It is safe to overwrite the csAttempt's context here, since all state
- // maintained in it are local to the attempt. When the attempt has to be
- // retried, a new instance of csAttempt will be created.
- if a.pickResult.Metadata != nil {
- // We currently do not have a function it the metadata package which
- // merges given metadata with existing metadata in a context. Existing
- // function `AppendToOutgoingContext()` takes a variadic argument of key
- // value pairs.
- //
- // TODO: Make it possible to retrieve key value pairs from metadata.MD
- // in a form passable to AppendToOutgoingContext(), or create a version
- // of AppendToOutgoingContext() that accepts a metadata.MD.
- md, _ := metadata.FromOutgoingContext(a.ctx)
- md = metadata.Join(md, a.pickResult.Metadata)
- a.ctx = metadata.NewOutgoingContext(a.ctx, md)
- }
-
- s, err := a.transport.NewStream(a.ctx, cs.callHdr)
- if err != nil {
- nse, ok := err.(*transport.NewStreamError)
- if !ok {
- // Unexpected.
- return err
- }
-
- if nse.AllowTransparentRetry {
- a.allowTransparentRetry = true
- }
-
- // Unwrap and convert error.
- return toRPCErr(nse.Err)
- }
- a.transportStream = s
- a.ctx = s.Context()
- a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
- return nil
-}
-
-// clientStream implements a client side Stream.
-type clientStream struct {
- callHdr *transport.CallHdr
- opts []CallOption
- callInfo *callInfo
- cc *ClientConn
- desc *StreamDesc
-
- codec baseCodec
- compressorV0 Compressor
- compressorV1 encoding.Compressor
-
- cancel context.CancelFunc // cancels all attempts
-
- sentLast bool // sent an end stream
-
- methodConfig *MethodConfig
-
- ctx context.Context // the application's context, wrapped by stats/tracing
-
- retryThrottler *retryThrottler // The throttler active when the RPC began.
-
- binlogs []binarylog.MethodLogger
- // serverHeaderBinlogged is a boolean for whether server header has been
- // logged. Server header will be logged when the first time one of those
- // happens: stream.Header(), stream.Recv().
- //
- // It's only read and used by Recv() and Header(), so it doesn't need to be
- // synchronized.
- serverHeaderBinlogged bool
-
- mu sync.Mutex
- firstAttempt bool // if true, transparent retry is valid
- numRetries int // exclusive of transparent retry attempt(s)
- numRetriesSincePushback int // retries since pushback; to reset backoff
- finished bool // TODO: replace with atomic cmpxchg or sync.Once?
- // attempt is the active client stream attempt.
- // The only place where it is written is the newAttemptLocked method and this method never writes nil.
- // So, attempt can be nil only inside newClientStream function when clientStream is first created.
- // One of the first things done after clientStream's creation, is to call newAttemptLocked which either
- // assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked,
- // then newClientStream calls finish on the clientStream and returns. So, finish method is the only
- // place where we need to check if the attempt is nil.
- attempt *csAttempt
- // TODO(hedging): hedging will have multiple attempts simultaneously.
- committed bool // active attempt committed for retry?
- onCommit func()
- replayBuffer []replayOp // operations to replay on retry
- replayBufferSize int // current size of replayBuffer
-}
-
-type replayOp struct {
- op func(a *csAttempt) error
- cleanup func()
-}
-
-// csAttempt implements a single transport stream attempt within a
-// clientStream.
-type csAttempt struct {
- ctx context.Context
- cs *clientStream
- transport transport.ClientTransport
- transportStream *transport.ClientStream
- parser *parser
- pickResult balancer.PickResult
-
- finished bool
- decompressorV0 Decompressor
- decompressorV1 encoding.Compressor
- decompressorSet bool
-
- mu sync.Mutex // guards trInfo.tr
- // trInfo may be nil (if EnableTracing is false).
- // trInfo.tr is set when created (if EnableTracing is true),
- // and cleared when the finish method is called.
- trInfo *traceInfo
-
- statsHandlers []stats.Handler
- beginTime time.Time
-
- // set for newStream errors that may be transparently retried
- allowTransparentRetry bool
- // set for pick errors that are returned as a status
- drop bool
-}
-
-func (cs *clientStream) commitAttemptLocked() {
- if !cs.committed && cs.onCommit != nil {
- cs.onCommit()
- }
- cs.committed = true
- for _, op := range cs.replayBuffer {
- if op.cleanup != nil {
- op.cleanup()
- }
- }
- cs.replayBuffer = nil
-}
-
-func (cs *clientStream) commitAttempt() {
- cs.mu.Lock()
- cs.commitAttemptLocked()
- cs.mu.Unlock()
-}
-
-// shouldRetry returns nil if the RPC should be retried; otherwise it returns
-// the error that should be returned by the operation. If the RPC should be
-// retried, the bool indicates whether it is being retried transparently.
-func (a *csAttempt) shouldRetry(err error) (bool, error) {
- cs := a.cs
-
- if cs.finished || cs.committed || a.drop {
- // RPC is finished or committed or was dropped by the picker; cannot retry.
- return false, err
- }
- if a.transportStream == nil && a.allowTransparentRetry {
- return true, nil
- }
- // Wait for the trailers.
- unprocessed := false
- if a.transportStream != nil {
- <-a.transportStream.Done()
- unprocessed = a.transportStream.Unprocessed()
- }
- if cs.firstAttempt && unprocessed {
- // First attempt, stream unprocessed: transparently retry.
- return true, nil
- }
- if cs.cc.dopts.disableRetry {
- return false, err
- }
-
- pushback := 0
- hasPushback := false
- if a.transportStream != nil {
- if !a.transportStream.TrailersOnly() {
- return false, err
- }
-
- // TODO(retry): Move down if the spec changes to not check server pushback
- // before considering this a failure for throttling.
- sps := a.transportStream.Trailer()["grpc-retry-pushback-ms"]
- if len(sps) == 1 {
- var e error
- if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
- channelz.Infof(logger, cs.cc.channelz, "Server retry pushback specified to abort (%q).", sps[0])
- cs.retryThrottler.throttle() // This counts as a failure for throttling.
- return false, err
- }
- hasPushback = true
- } else if len(sps) > 1 {
- channelz.Warningf(logger, cs.cc.channelz, "Server retry pushback specified multiple values (%q); not retrying.", sps)
- cs.retryThrottler.throttle() // This counts as a failure for throttling.
- return false, err
- }
- }
-
- var code codes.Code
- if a.transportStream != nil {
- code = a.transportStream.Status().Code()
- } else {
- code = status.Code(err)
- }
-
- rp := cs.methodConfig.RetryPolicy
- if rp == nil || !rp.RetryableStatusCodes[code] {
- return false, err
- }
-
- // Note: the ordering here is important; we count this as a failure
- // only if the code matched a retryable code.
- if cs.retryThrottler.throttle() {
- return false, err
- }
- if cs.numRetries+1 >= rp.MaxAttempts {
- return false, err
- }
-
- var dur time.Duration
- if hasPushback {
- dur = time.Millisecond * time.Duration(pushback)
- cs.numRetriesSincePushback = 0
- } else {
- fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback))
- cur := min(float64(rp.InitialBackoff)*fact, float64(rp.MaxBackoff))
- // Apply jitter by multiplying with a random factor between 0.8 and 1.2
- cur *= 0.8 + 0.4*rand.Float64()
- dur = time.Duration(int64(cur))
- cs.numRetriesSincePushback++
- }
-
- // TODO(dfawley): we could eagerly fail here if dur puts us past the
- // deadline, but unsure if it is worth doing.
- t := time.NewTimer(dur)
- select {
- case <-t.C:
- cs.numRetries++
- return false, nil
- case <-cs.ctx.Done():
- t.Stop()
- return false, status.FromContextError(cs.ctx.Err()).Err()
- }
-}
-
-// Returns nil if a retry was performed and succeeded; error otherwise.
-func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error {
- for {
- attempt.finish(toRPCErr(lastErr))
- isTransparent, err := attempt.shouldRetry(lastErr)
- if err != nil {
- cs.commitAttemptLocked()
- return err
- }
- cs.firstAttempt = false
- attempt, err = cs.newAttemptLocked(isTransparent)
- if err != nil {
- // Only returns error if the clientconn is closed or the context of
- // the stream is canceled.
- return err
- }
- // Note that the first op in replayBuffer always sets cs.attempt
- // if it is able to pick a transport and create a stream.
- if lastErr = cs.replayBufferLocked(attempt); lastErr == nil {
- return nil
- }
- }
-}
-
-func (cs *clientStream) Context() context.Context {
- cs.commitAttempt()
- // No need to lock before using attempt, since we know it is committed and
- // cannot change.
- if cs.attempt.transportStream != nil {
- return cs.attempt.transportStream.Context()
- }
- return cs.ctx
-}
-
-func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
- cs.mu.Lock()
- for {
- if cs.committed {
- cs.mu.Unlock()
- // toRPCErr is used in case the error from the attempt comes from
- // NewClientStream, which intentionally doesn't return a status
- // error to allow for further inspection; all other errors should
- // already be status errors.
- return toRPCErr(op(cs.attempt))
- }
- if len(cs.replayBuffer) == 0 {
- // For the first op, which controls creation of the stream and
- // assigns cs.attempt, we need to create a new attempt inline
- // before executing the first op. On subsequent ops, the attempt
- // is created immediately before replaying the ops.
- var err error
- if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil {
- cs.mu.Unlock()
- cs.finish(err)
- return err
- }
- }
- a := cs.attempt
- cs.mu.Unlock()
- err := op(a)
- cs.mu.Lock()
- if a != cs.attempt {
- // We started another attempt already.
- continue
- }
- if err == io.EOF {
- <-a.transportStream.Done()
- }
- if err == nil || (err == io.EOF && a.transportStream.Status().Code() == codes.OK) {
- onSuccess()
- cs.mu.Unlock()
- return err
- }
- if err := cs.retryLocked(a, err); err != nil {
- cs.mu.Unlock()
- return err
- }
- }
-}
-
-func (cs *clientStream) Header() (metadata.MD, error) {
- var m metadata.MD
- err := cs.withRetry(func(a *csAttempt) error {
- var err error
- m, err = a.transportStream.Header()
- return toRPCErr(err)
- }, cs.commitAttemptLocked)
-
- if m == nil && err == nil {
- // The stream ended with success. Finish the clientStream.
- err = io.EOF
- }
-
- if err != nil {
- cs.finish(err)
- // Do not return the error. The user should get it by calling Recv().
- return nil, nil
- }
-
- if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil {
- // Only log if binary log is on and header has not been logged, and
- // there is actually headers to log.
- logEntry := &binarylog.ServerHeader{
- OnClientSide: true,
- Header: m,
- PeerAddr: nil,
- }
- if peer, ok := peer.FromContext(cs.Context()); ok {
- logEntry.PeerAddr = peer.Addr
- }
- cs.serverHeaderBinlogged = true
- for _, binlog := range cs.binlogs {
- binlog.Log(cs.ctx, logEntry)
- }
- }
-
- return m, nil
-}
-
-func (cs *clientStream) Trailer() metadata.MD {
- // On RPC failure, we never need to retry, because usage requires that
- // RecvMsg() returned a non-nil error before calling this function is valid.
- // We would have retried earlier if necessary.
- //
- // Commit the attempt anyway, just in case users are not following those
- // directions -- it will prevent races and should not meaningfully impact
- // performance.
- cs.commitAttempt()
- if cs.attempt.transportStream == nil {
- return nil
- }
- return cs.attempt.transportStream.Trailer()
-}
-
-func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error {
- for _, f := range cs.replayBuffer {
- if err := f.op(attempt); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) {
- // Note: we still will buffer if retry is disabled (for transparent retries).
- if cs.committed {
- return
- }
- cs.replayBufferSize += sz
- if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize {
- cs.commitAttemptLocked()
- cleanup()
- return
- }
- cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup})
-}
-
-func (cs *clientStream) SendMsg(m any) (err error) {
- defer func() {
- if err != nil && err != io.EOF {
- // Call finish on the client stream for errors generated by this SendMsg
- // call, as these indicate problems created by this client. (Transport
- // errors are converted to an io.EOF error in csAttempt.sendMsg; the real
- // error will be returned from RecvMsg eventually in that case, or be
- // retried.)
- cs.finish(err)
- }
- }()
- if cs.sentLast {
- return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
- }
- if !cs.desc.ClientStreams {
- cs.sentLast = true
- }
-
- // load hdr, payload, data
- hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.compressorV0, cs.compressorV1, cs.cc.dopts.copts.BufferPool)
- if err != nil {
- return err
- }
-
- defer func() {
- data.Free()
- // only free payload if compression was made, and therefore it is a different set
- // of buffers from data.
- if pf.isCompressed() {
- payload.Free()
- }
- }()
-
- dataLen := data.Len()
- payloadLen := payload.Len()
- // TODO(dfawley): should we be checking len(data) instead?
- if payloadLen > *cs.callInfo.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize)
- }
-
- // always take an extra ref in case data == payload (i.e. when the data isn't
- // compressed). The original ref will always be freed by the deferred free above.
- payload.Ref()
- op := func(a *csAttempt) error {
- return a.sendMsg(m, hdr, payload, dataLen, payloadLen)
- }
-
- // onSuccess is invoked when the op is captured for a subsequent retry. If the
- // stream was established by a previous message and therefore retries are
- // disabled, onSuccess will not be invoked, and payloadRef can be freed
- // immediately.
- onSuccessCalled := false
- err = cs.withRetry(op, func() {
- cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free)
- onSuccessCalled = true
- })
- if !onSuccessCalled {
- payload.Free()
- }
- if len(cs.binlogs) != 0 && err == nil {
- cm := &binarylog.ClientMessage{
- OnClientSide: true,
- Message: data.Materialize(),
- }
- for _, binlog := range cs.binlogs {
- binlog.Log(cs.ctx, cm)
- }
- }
- return err
-}
-
-func (cs *clientStream) RecvMsg(m any) error {
- if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged {
- // Call Header() to binary log header if it's not already logged.
- cs.Header()
- }
- var recvInfo *payloadInfo
- if len(cs.binlogs) != 0 {
- recvInfo = &payloadInfo{}
- defer recvInfo.free()
- }
- err := cs.withRetry(func(a *csAttempt) error {
- return a.recvMsg(m, recvInfo)
- }, cs.commitAttemptLocked)
- if len(cs.binlogs) != 0 && err == nil {
- sm := &binarylog.ServerMessage{
- OnClientSide: true,
- Message: recvInfo.uncompressedBytes.Materialize(),
- }
- for _, binlog := range cs.binlogs {
- binlog.Log(cs.ctx, sm)
- }
- }
- if err != nil || !cs.desc.ServerStreams {
- // err != nil or non-server-streaming indicates end of stream.
- cs.finish(err)
- }
- return err
-}
-
-func (cs *clientStream) CloseSend() error {
- if cs.sentLast {
- // TODO: return an error and finish the stream instead, due to API misuse?
- return nil
- }
- cs.sentLast = true
- op := func(a *csAttempt) error {
- a.transportStream.Write(nil, nil, &transport.WriteOptions{Last: true})
- // Always return nil; io.EOF is the only error that might make sense
- // instead, but there is no need to signal the client to call RecvMsg
- // as the only use left for the stream after CloseSend is to call
- // RecvMsg. This also matches historical behavior.
- return nil
- }
- cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) })
- if len(cs.binlogs) != 0 {
- chc := &binarylog.ClientHalfClose{
- OnClientSide: true,
- }
- for _, binlog := range cs.binlogs {
- binlog.Log(cs.ctx, chc)
- }
- }
- // We never returned an error here for reasons.
- return nil
-}
-
-func (cs *clientStream) finish(err error) {
- if err == io.EOF {
- // Ending a stream with EOF indicates a success.
- err = nil
- }
- cs.mu.Lock()
- if cs.finished {
- cs.mu.Unlock()
- return
- }
- cs.finished = true
- for _, onFinish := range cs.callInfo.onFinish {
- onFinish(err)
- }
- cs.commitAttemptLocked()
- if cs.attempt != nil {
- cs.attempt.finish(err)
- // after functions all rely upon having a stream.
- if cs.attempt.transportStream != nil {
- for _, o := range cs.opts {
- o.after(cs.callInfo, cs.attempt)
- }
- }
- }
-
- cs.mu.Unlock()
- // Only one of cancel or trailer needs to be logged.
- if len(cs.binlogs) != 0 {
- switch err {
- case errContextCanceled, errContextDeadline, ErrClientConnClosing:
- c := &binarylog.Cancel{
- OnClientSide: true,
- }
- for _, binlog := range cs.binlogs {
- binlog.Log(cs.ctx, c)
- }
- default:
- logEntry := &binarylog.ServerTrailer{
- OnClientSide: true,
- Trailer: cs.Trailer(),
- Err: err,
- }
- if peer, ok := peer.FromContext(cs.Context()); ok {
- logEntry.PeerAddr = peer.Addr
- }
- for _, binlog := range cs.binlogs {
- binlog.Log(cs.ctx, logEntry)
- }
- }
- }
- if err == nil {
- cs.retryThrottler.successfulRPC()
- }
- if channelz.IsOn() {
- if err != nil {
- cs.cc.incrCallsFailed()
- } else {
- cs.cc.incrCallsSucceeded()
- }
- }
- cs.cancel()
-}
-
-func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error {
- cs := a.cs
- if a.trInfo != nil {
- a.mu.Lock()
- if a.trInfo.tr != nil {
- a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
- }
- a.mu.Unlock()
- }
- if err := a.transportStream.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil {
- if !cs.desc.ClientStreams {
- // For non-client-streaming RPCs, we return nil instead of EOF on error
- // because the generated code requires it. finish is not called; RecvMsg()
- // will call it with the stream's status independently.
- return nil
- }
- return io.EOF
- }
- if len(a.statsHandlers) != 0 {
- for _, sh := range a.statsHandlers {
- sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
- }
- }
- return nil
-}
-
-func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
- cs := a.cs
- if len(a.statsHandlers) != 0 && payInfo == nil {
- payInfo = &payloadInfo{}
- defer payInfo.free()
- }
-
- if !a.decompressorSet {
- // Block until we receive headers containing received message encoding.
- if ct := a.transportStream.RecvCompress(); ct != "" && ct != encoding.Identity {
- if a.decompressorV0 == nil || a.decompressorV0.Type() != ct {
- // No configured decompressor, or it does not match the incoming
- // message encoding; attempt to find a registered compressor that does.
- a.decompressorV0 = nil
- a.decompressorV1 = encoding.GetCompressor(ct)
- }
- } else {
- // No compression is used; disable our decompressor.
- a.decompressorV0 = nil
- }
- // Only initialize this state once per stream.
- a.decompressorSet = true
- }
- if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil {
- if err == io.EOF {
- if statusErr := a.transportStream.Status().Err(); statusErr != nil {
- return statusErr
- }
- return io.EOF // indicates successful end of stream.
- }
-
- return toRPCErr(err)
- }
- if a.trInfo != nil {
- a.mu.Lock()
- if a.trInfo.tr != nil {
- a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
- }
- a.mu.Unlock()
- }
- for _, sh := range a.statsHandlers {
- sh.HandleRPC(a.ctx, &stats.InPayload{
- Client: true,
- RecvTime: time.Now(),
- Payload: m,
- WireLength: payInfo.compressedLength + headerLen,
- CompressedLength: payInfo.compressedLength,
- Length: payInfo.uncompressedBytes.Len(),
- })
- }
- if cs.desc.ServerStreams {
- // Subsequent messages should be received by subsequent RecvMsg calls.
- return nil
- }
- // Special handling for non-server-stream rpcs.
- // This recv expects EOF or errors, so we don't collect inPayload.
- if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF {
- return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success
- } else if err != nil {
- return toRPCErr(err)
- }
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
-}
-
-func (a *csAttempt) finish(err error) {
- a.mu.Lock()
- if a.finished {
- a.mu.Unlock()
- return
- }
- a.finished = true
- if err == io.EOF {
- // Ending a stream with EOF indicates a success.
- err = nil
- }
- var tr metadata.MD
- if a.transportStream != nil {
- a.transportStream.Close(err)
- tr = a.transportStream.Trailer()
- }
-
- if a.pickResult.Done != nil {
- br := false
- if a.transportStream != nil {
- br = a.transportStream.BytesReceived()
- }
- a.pickResult.Done(balancer.DoneInfo{
- Err: err,
- Trailer: tr,
- BytesSent: a.transportStream != nil,
- BytesReceived: br,
- ServerLoad: balancerload.Parse(tr),
- })
- }
- for _, sh := range a.statsHandlers {
- end := &stats.End{
- Client: true,
- BeginTime: a.beginTime,
- EndTime: time.Now(),
- Trailer: tr,
- Error: err,
- }
- sh.HandleRPC(a.ctx, end)
- }
- if a.trInfo != nil && a.trInfo.tr != nil {
- if err == nil {
- a.trInfo.tr.LazyPrintf("RPC: [OK]")
- } else {
- a.trInfo.tr.LazyPrintf("RPC: [%v]", err)
- a.trInfo.tr.SetError()
- }
- a.trInfo.tr.Finish()
- a.trInfo.tr = nil
- }
- a.mu.Unlock()
-}
-
-// newNonRetryClientStream creates a ClientStream with the specified transport, on the
-// given addrConn.
-//
-// It's expected that the given transport is either the same one in addrConn, or
-// is already closed. To avoid race, transport is specified separately, instead
-// of using ac.transport.
-//
-// Main difference between this and ClientConn.NewStream:
-// - no retry
-// - no service config (or wait for service config)
-// - no tracing or stats
-func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) {
- if t == nil {
- // TODO: return RPC error here?
- return nil, errors.New("transport provided is nil")
- }
- // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct.
- c := &callInfo{}
-
- // Possible context leak:
- // The cancel function for the child context we create will only be called
- // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
- // an error is generated by SendMsg.
- // https://github.com/grpc/grpc-go/issues/1818.
- ctx, cancel := context.WithCancel(ctx)
- defer func() {
- if err != nil {
- cancel()
- }
- }()
-
- for _, o := range opts {
- if err := o.before(c); err != nil {
- return nil, toRPCErr(err)
- }
- }
- c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
- c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
- if err := setCallInfoCodec(c); err != nil {
- return nil, err
- }
-
- callHdr := &transport.CallHdr{
- Host: ac.cc.authority,
- Method: method,
- ContentSubtype: c.contentSubtype,
- }
-
- // Set our outgoing compression according to the UseCompressor CallOption, if
- // set. In that case, also find the compressor from the encoding package.
- // Otherwise, use the compressor configured by the WithCompressor DialOption,
- // if set.
- var cp Compressor
- var comp encoding.Compressor
- if ct := c.compressorName; ct != "" {
- callHdr.SendCompress = ct
- if ct != encoding.Identity {
- comp = encoding.GetCompressor(ct)
- if comp == nil {
- return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
- }
- }
- } else if ac.cc.dopts.compressorV0 != nil {
- callHdr.SendCompress = ac.cc.dopts.compressorV0.Type()
- cp = ac.cc.dopts.compressorV0
- }
- if c.creds != nil {
- callHdr.Creds = c.creds
- }
-
- // Use a special addrConnStream to avoid retry.
- as := &addrConnStream{
- callHdr: callHdr,
- ac: ac,
- ctx: ctx,
- cancel: cancel,
- opts: opts,
- callInfo: c,
- desc: desc,
- codec: c.codec,
- sendCompressorV0: cp,
- sendCompressorV1: comp,
- transport: t,
- }
-
- s, err := as.transport.NewStream(as.ctx, as.callHdr)
- if err != nil {
- err = toRPCErr(err)
- return nil, err
- }
- as.transportStream = s
- as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
- ac.incrCallsStarted()
- if desc != unaryStreamDesc {
- // Listen on stream context to cleanup when the stream context is
- // canceled. Also listen for the addrConn's context in case the
- // addrConn is closed or reconnects to a different address. In all
- // other cases, an error should already be injected into the recv
- // buffer by the transport, which the client will eventually receive,
- // and then we will cancel the stream's context in
- // addrConnStream.finish.
- go func() {
- ac.mu.Lock()
- acCtx := ac.ctx
- ac.mu.Unlock()
- select {
- case <-acCtx.Done():
- as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing"))
- case <-ctx.Done():
- as.finish(toRPCErr(ctx.Err()))
- }
- }()
- }
- return as, nil
-}
-
-type addrConnStream struct {
- transportStream *transport.ClientStream
- ac *addrConn
- callHdr *transport.CallHdr
- cancel context.CancelFunc
- opts []CallOption
- callInfo *callInfo
- transport transport.ClientTransport
- ctx context.Context
- sentLast bool
- desc *StreamDesc
- codec baseCodec
- sendCompressorV0 Compressor
- sendCompressorV1 encoding.Compressor
- decompressorSet bool
- decompressorV0 Decompressor
- decompressorV1 encoding.Compressor
- parser *parser
-
- // mu guards finished and is held for the entire finish method.
- mu sync.Mutex
- finished bool
-}
-
-func (as *addrConnStream) Header() (metadata.MD, error) {
- m, err := as.transportStream.Header()
- if err != nil {
- as.finish(toRPCErr(err))
- }
- return m, err
-}
-
-func (as *addrConnStream) Trailer() metadata.MD {
- return as.transportStream.Trailer()
-}
-
-func (as *addrConnStream) CloseSend() error {
- if as.sentLast {
- // TODO: return an error and finish the stream instead, due to API misuse?
- return nil
- }
- as.sentLast = true
-
- as.transportStream.Write(nil, nil, &transport.WriteOptions{Last: true})
- // Always return nil; io.EOF is the only error that might make sense
- // instead, but there is no need to signal the client to call RecvMsg
- // as the only use left for the stream after CloseSend is to call
- // RecvMsg. This also matches historical behavior.
- return nil
-}
-
-func (as *addrConnStream) Context() context.Context {
- return as.transportStream.Context()
-}
-
-func (as *addrConnStream) SendMsg(m any) (err error) {
- defer func() {
- if err != nil && err != io.EOF {
- // Call finish on the client stream for errors generated by this SendMsg
- // call, as these indicate problems created by this client. (Transport
- // errors are converted to an io.EOF error in csAttempt.sendMsg; the real
- // error will be returned from RecvMsg eventually in that case, or be
- // retried.)
- as.finish(err)
- }
- }()
- if as.sentLast {
- return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
- }
- if !as.desc.ClientStreams {
- as.sentLast = true
- }
-
- // load hdr, payload, data
- hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.sendCompressorV0, as.sendCompressorV1, as.ac.dopts.copts.BufferPool)
- if err != nil {
- return err
- }
-
- defer func() {
- data.Free()
- // only free payload if compression was made, and therefore it is a different set
- // of buffers from data.
- if pf.isCompressed() {
- payload.Free()
- }
- }()
-
- // TODO(dfawley): should we be checking len(data) instead?
- if payload.Len() > *as.callInfo.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize)
- }
-
- if err := as.transportStream.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil {
- if !as.desc.ClientStreams {
- // For non-client-streaming RPCs, we return nil instead of EOF on error
- // because the generated code requires it. finish is not called; RecvMsg()
- // will call it with the stream's status independently.
- return nil
- }
- return io.EOF
- }
-
- return nil
-}
-
-func (as *addrConnStream) RecvMsg(m any) (err error) {
- defer func() {
- if err != nil || !as.desc.ServerStreams {
- // err != nil or non-server-streaming indicates end of stream.
- as.finish(err)
- }
- }()
-
- if !as.decompressorSet {
- // Block until we receive headers containing received message encoding.
- if ct := as.transportStream.RecvCompress(); ct != "" && ct != encoding.Identity {
- if as.decompressorV0 == nil || as.decompressorV0.Type() != ct {
- // No configured decompressor, or it does not match the incoming
- // message encoding; attempt to find a registered compressor that does.
- as.decompressorV0 = nil
- as.decompressorV1 = encoding.GetCompressor(ct)
- }
- } else {
- // No compression is used; disable our decompressor.
- as.decompressorV0 = nil
- }
- // Only initialize this state once per stream.
- as.decompressorSet = true
- }
- if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil {
- if err == io.EOF {
- if statusErr := as.transportStream.Status().Err(); statusErr != nil {
- return statusErr
- }
- return io.EOF // indicates successful end of stream.
- }
- return toRPCErr(err)
- }
-
- if as.desc.ServerStreams {
- // Subsequent messages should be received by subsequent RecvMsg calls.
- return nil
- }
-
- // Special handling for non-server-stream rpcs.
- // This recv expects EOF or errors, so we don't collect inPayload.
- if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF {
- return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success
- } else if err != nil {
- return toRPCErr(err)
- }
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
-}
-
-func (as *addrConnStream) finish(err error) {
- as.mu.Lock()
- if as.finished {
- as.mu.Unlock()
- return
- }
- as.finished = true
- if err == io.EOF {
- // Ending a stream with EOF indicates a success.
- err = nil
- }
- if as.transportStream != nil {
- as.transportStream.Close(err)
- }
-
- if err != nil {
- as.ac.incrCallsFailed()
- } else {
- as.ac.incrCallsSucceeded()
- }
- as.cancel()
- as.mu.Unlock()
-}
-
-// ServerStream defines the server-side behavior of a streaming RPC.
-//
-// Errors returned from ServerStream methods are compatible with the status
-// package. However, the status code will often not match the RPC status as
-// seen by the client application, and therefore, should not be relied upon for
-// this purpose.
-type ServerStream interface {
- // SetHeader sets the header metadata. It may be called multiple times.
- // When call multiple times, all the provided metadata will be merged.
- // All the metadata will be sent out when one of the following happens:
- // - ServerStream.SendHeader() is called;
- // - The first response is sent out;
- // - An RPC status is sent out (error or success).
- SetHeader(metadata.MD) error
- // SendHeader sends the header metadata.
- // The provided md and headers set by SetHeader() will be sent.
- // It fails if called multiple times.
- SendHeader(metadata.MD) error
- // SetTrailer sets the trailer metadata which will be sent with the RPC status.
- // When called more than once, all the provided metadata will be merged.
- SetTrailer(metadata.MD)
- // Context returns the context for this stream.
- Context() context.Context
- // SendMsg sends a message. On error, SendMsg aborts the stream and the
- // error is returned directly.
- //
- // SendMsg blocks until:
- // - There is sufficient flow control to schedule m with the transport, or
- // - The stream is done, or
- // - The stream breaks.
- //
- // SendMsg does not wait until the message is received by the client. An
- // untimely stream closure may result in lost messages.
- //
- // It is safe to have a goroutine calling SendMsg and another goroutine
- // calling RecvMsg on the same stream at the same time, but it is not safe
- // to call SendMsg on the same stream in different goroutines.
- //
- // It is not safe to modify the message after calling SendMsg. Tracing
- // libraries and stats handlers may use the message lazily.
- SendMsg(m any) error
- // RecvMsg blocks until it receives a message into m or the stream is
- // done. It returns io.EOF when the client has performed a CloseSend. On
- // any non-EOF error, the stream is aborted and the error contains the
- // RPC status.
- //
- // It is safe to have a goroutine calling SendMsg and another goroutine
- // calling RecvMsg on the same stream at the same time, but it is not
- // safe to call RecvMsg on the same stream in different goroutines.
- RecvMsg(m any) error
-}
-
-// serverStream implements a server side Stream.
-type serverStream struct {
- ctx context.Context
- s *transport.ServerStream
- p *parser
- codec baseCodec
-
- compressorV0 Compressor
- compressorV1 encoding.Compressor
- decompressorV0 Decompressor
- decompressorV1 encoding.Compressor
-
- sendCompressorName string
-
- maxReceiveMessageSize int
- maxSendMessageSize int
- trInfo *traceInfo
-
- statsHandler []stats.Handler
-
- binlogs []binarylog.MethodLogger
- // serverHeaderBinlogged indicates whether server header has been logged. It
- // will happen when one of the following two happens: stream.SendHeader(),
- // stream.Send().
- //
- // It's only checked in send and sendHeader, doesn't need to be
- // synchronized.
- serverHeaderBinlogged bool
-
- mu sync.Mutex // protects trInfo.tr after the service handler runs.
-}
-
-func (ss *serverStream) Context() context.Context {
- return ss.ctx
-}
-
-func (ss *serverStream) SetHeader(md metadata.MD) error {
- if md.Len() == 0 {
- return nil
- }
- err := imetadata.Validate(md)
- if err != nil {
- return status.Error(codes.Internal, err.Error())
- }
- return ss.s.SetHeader(md)
-}
-
-func (ss *serverStream) SendHeader(md metadata.MD) error {
- err := imetadata.Validate(md)
- if err != nil {
- return status.Error(codes.Internal, err.Error())
- }
-
- err = ss.s.SendHeader(md)
- if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged {
- h, _ := ss.s.Header()
- sh := &binarylog.ServerHeader{
- Header: h,
- }
- ss.serverHeaderBinlogged = true
- for _, binlog := range ss.binlogs {
- binlog.Log(ss.ctx, sh)
- }
- }
- return err
-}
-
-func (ss *serverStream) SetTrailer(md metadata.MD) {
- if md.Len() == 0 {
- return
- }
- if err := imetadata.Validate(md); err != nil {
- logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err)
- }
- ss.s.SetTrailer(md)
-}
-
-func (ss *serverStream) SendMsg(m any) (err error) {
- defer func() {
- if ss.trInfo != nil {
- ss.mu.Lock()
- if ss.trInfo.tr != nil {
- if err == nil {
- ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
- } else {
- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
- ss.trInfo.tr.SetError()
- }
- }
- ss.mu.Unlock()
- }
- if err != nil && err != io.EOF {
- st, _ := status.FromError(toRPCErr(err))
- ss.s.WriteStatus(st)
- // Non-user specified status was sent out. This should be an error
- // case (as a server side Cancel maybe).
- //
- // This is not handled specifically now. User will return a final
- // status from the service handler, we will log that error instead.
- // This behavior is similar to an interceptor.
- }
- }()
-
- // Server handler could have set new compressor by calling SetSendCompressor.
- // In case it is set, we need to use it for compressing outbound message.
- if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName {
- ss.compressorV1 = encoding.GetCompressor(sendCompressorsName)
- ss.sendCompressorName = sendCompressorsName
- }
-
- // load hdr, payload, data
- hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.compressorV0, ss.compressorV1, ss.p.bufferPool)
- if err != nil {
- return err
- }
-
- defer func() {
- data.Free()
- // only free payload if compression was made, and therefore it is a different set
- // of buffers from data.
- if pf.isCompressed() {
- payload.Free()
- }
- }()
-
- dataLen := data.Len()
- payloadLen := payload.Len()
-
- // TODO(dfawley): should we be checking len(data) instead?
- if payloadLen > ss.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize)
- }
- if err := ss.s.Write(hdr, payload, &transport.WriteOptions{Last: false}); err != nil {
- return toRPCErr(err)
- }
-
- if len(ss.binlogs) != 0 {
- if !ss.serverHeaderBinlogged {
- h, _ := ss.s.Header()
- sh := &binarylog.ServerHeader{
- Header: h,
- }
- ss.serverHeaderBinlogged = true
- for _, binlog := range ss.binlogs {
- binlog.Log(ss.ctx, sh)
- }
- }
- sm := &binarylog.ServerMessage{
- Message: data.Materialize(),
- }
- for _, binlog := range ss.binlogs {
- binlog.Log(ss.ctx, sm)
- }
- }
- if len(ss.statsHandler) != 0 {
- for _, sh := range ss.statsHandler {
- sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
- }
- }
- return nil
-}
-
-func (ss *serverStream) RecvMsg(m any) (err error) {
- defer func() {
- if ss.trInfo != nil {
- ss.mu.Lock()
- if ss.trInfo.tr != nil {
- if err == nil {
- ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
- } else if err != io.EOF {
- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
- ss.trInfo.tr.SetError()
- }
- }
- ss.mu.Unlock()
- }
- if err != nil && err != io.EOF {
- st, _ := status.FromError(toRPCErr(err))
- ss.s.WriteStatus(st)
- // Non-user specified status was sent out. This should be an error
- // case (as a server side Cancel maybe).
- //
- // This is not handled specifically now. User will return a final
- // status from the service handler, we will log that error instead.
- // This behavior is similar to an interceptor.
- }
- }()
- var payInfo *payloadInfo
- if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
- payInfo = &payloadInfo{}
- defer payInfo.free()
- }
- if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil {
- if err == io.EOF {
- if len(ss.binlogs) != 0 {
- chc := &binarylog.ClientHalfClose{}
- for _, binlog := range ss.binlogs {
- binlog.Log(ss.ctx, chc)
- }
- }
- return err
- }
- if err == io.ErrUnexpectedEOF {
- err = status.Error(codes.Internal, io.ErrUnexpectedEOF.Error())
- }
- return toRPCErr(err)
- }
- if len(ss.statsHandler) != 0 {
- for _, sh := range ss.statsHandler {
- sh.HandleRPC(ss.s.Context(), &stats.InPayload{
- RecvTime: time.Now(),
- Payload: m,
- Length: payInfo.uncompressedBytes.Len(),
- WireLength: payInfo.compressedLength + headerLen,
- CompressedLength: payInfo.compressedLength,
- })
- }
- }
- if len(ss.binlogs) != 0 {
- cm := &binarylog.ClientMessage{
- Message: payInfo.uncompressedBytes.Materialize(),
- }
- for _, binlog := range ss.binlogs {
- binlog.Log(ss.ctx, cm)
- }
- }
- return nil
-}
-
-// MethodFromServerStream returns the method string for the input stream.
-// The returned string is in the format of "/service/method".
-func MethodFromServerStream(stream ServerStream) (string, bool) {
- return Method(stream.Context())
-}
-
-// prepareMsg returns the hdr, payload and data using the compressors passed or
-// using the passed preparedmsg. The returned boolean indicates whether
-// compression was made and therefore whether the payload needs to be freed in
-// addition to the returned data. Freeing the payload if the returned boolean is
-// false can lead to undefined behavior.
-func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) {
- if preparedMsg, ok := m.(*PreparedMsg); ok {
- return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil
- }
- // The input interface is not a prepared msg.
- // Marshal and Compress the data at this point
- data, err = encode(codec, m)
- if err != nil {
- return nil, nil, nil, 0, err
- }
- compData, pf, err := compress(data, cp, comp, pool)
- if err != nil {
- data.Free()
- return nil, nil, nil, 0, err
- }
- hdr, payload = msgHeader(data, compData, pf)
- return hdr, data, payload, pf, nil
-}
diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go
deleted file mode 100644
index 0037fee0b..000000000
--- a/vendor/google.golang.org/grpc/stream_interfaces.go
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-// ServerStreamingClient represents the client side of a server-streaming (one
-// request, many responses) RPC. It is generic over the type of the response
-// message. It is used in generated code.
-type ServerStreamingClient[Res any] interface {
- // Recv receives the next response message from the server. The client may
- // repeatedly call Recv to read messages from the response stream. If
- // io.EOF is returned, the stream has terminated with an OK status. Any
- // other error is compatible with the status package and indicates the
- // RPC's status code and message.
- Recv() (*Res, error)
-
- // ClientStream is embedded to provide Context, Header, and Trailer
- // functionality. No other methods in the ClientStream should be called
- // directly.
- ClientStream
-}
-
-// ServerStreamingServer represents the server side of a server-streaming (one
-// request, many responses) RPC. It is generic over the type of the response
-// message. It is used in generated code.
-//
-// To terminate the response stream, return from the handler method and return
-// an error from the status package, or use nil to indicate an OK status code.
-type ServerStreamingServer[Res any] interface {
- // Send sends a response message to the client. The server handler may
- // call Send multiple times to send multiple messages to the client. An
- // error is returned if the stream was terminated unexpectedly, and the
- // handler method should return, as the stream is no longer usable.
- Send(*Res) error
-
- // ServerStream is embedded to provide Context, SetHeader, SendHeader, and
- // SetTrailer functionality. No other methods in the ServerStream should
- // be called directly.
- ServerStream
-}
-
-// ClientStreamingClient represents the client side of a client-streaming (many
-// requests, one response) RPC. It is generic over both the type of the request
-// message stream and the type of the unary response message. It is used in
-// generated code.
-type ClientStreamingClient[Req any, Res any] interface {
- // Send sends a request message to the server. The client may call Send
- // multiple times to send multiple messages to the server. On error, Send
- // aborts the stream. If the error was generated by the client, the status
- // is returned directly. Otherwise, io.EOF is returned, and the status of
- // the stream may be discovered using CloseAndRecv().
- Send(*Req) error
-
- // CloseAndRecv closes the request stream and waits for the server's
- // response. This method must be called once and only once after sending
- // all request messages. Any error returned is implemented by the status
- // package.
- CloseAndRecv() (*Res, error)
-
- // ClientStream is embedded to provide Context, Header, and Trailer
- // functionality. No other methods in the ClientStream should be called
- // directly.
- ClientStream
-}
-
-// ClientStreamingServer represents the server side of a client-streaming (many
-// requests, one response) RPC. It is generic over both the type of the request
-// message stream and the type of the unary response message. It is used in
-// generated code.
-//
-// To terminate the RPC, call SendAndClose and return nil from the method
-// handler or do not call SendAndClose and return an error from the status
-// package.
-type ClientStreamingServer[Req any, Res any] interface {
- // Recv receives the next request message from the client. The server may
- // repeatedly call Recv to read messages from the request stream. If
- // io.EOF is returned, it indicates the client called CloseAndRecv on its
- // ClientStreamingClient. Any other error indicates the stream was
- // terminated unexpectedly, and the handler method should return, as the
- // stream is no longer usable.
- Recv() (*Req, error)
-
- // SendAndClose sends a single response message to the client and closes
- // the stream. This method must be called once and only once after all
- // request messages have been processed. Recv should not be called after
- // calling SendAndClose.
- SendAndClose(*Res) error
-
- // ServerStream is embedded to provide Context, SetHeader, SendHeader, and
- // SetTrailer functionality. No other methods in the ServerStream should
- // be called directly.
- ServerStream
-}
-
-// BidiStreamingClient represents the client side of a bidirectional-streaming
-// (many requests, many responses) RPC. It is generic over both the type of the
-// request message stream and the type of the response message stream. It is
-// used in generated code.
-type BidiStreamingClient[Req any, Res any] interface {
- // Send sends a request message to the server. The client may call Send
- // multiple times to send multiple messages to the server. On error, Send
- // aborts the stream. If the error was generated by the client, the status
- // is returned directly. Otherwise, io.EOF is returned, and the status of
- // the stream may be discovered using Recv().
- Send(*Req) error
-
- // Recv receives the next response message from the server. The client may
- // repeatedly call Recv to read messages from the response stream. If
- // io.EOF is returned, the stream has terminated with an OK status. Any
- // other error is compatible with the status package and indicates the
- // RPC's status code and message.
- Recv() (*Res, error)
-
- // ClientStream is embedded to provide Context, Header, Trailer, and
- // CloseSend functionality. No other methods in the ClientStream should be
- // called directly.
- ClientStream
-}
-
-// BidiStreamingServer represents the server side of a bidirectional-streaming
-// (many requests, many responses) RPC. It is generic over both the type of the
-// request message stream and the type of the response message stream. It is
-// used in generated code.
-//
-// To terminate the stream, return from the handler method and return
-// an error from the status package, or use nil to indicate an OK status code.
-type BidiStreamingServer[Req any, Res any] interface {
- // Recv receives the next request message from the client. The server may
- // repeatedly call Recv to read messages from the request stream. If
- // io.EOF is returned, it indicates the client called CloseSend on its
- // BidiStreamingClient. Any other error indicates the stream was
- // terminated unexpectedly, and the handler method should return, as the
- // stream is no longer usable.
- Recv() (*Req, error)
-
- // Send sends a response message to the client. The server handler may
- // call Send multiple times to send multiple messages to the client. An
- // error is returned if the stream was terminated unexpectedly, and the
- // handler method should return, as the stream is no longer usable.
- Send(*Res) error
-
- // ServerStream is embedded to provide Context, SetHeader, SendHeader, and
- // SetTrailer functionality. No other methods in the ServerStream should
- // be called directly.
- ServerStream
-}
-
-// GenericClientStream implements the ServerStreamingClient, ClientStreamingClient,
-// and BidiStreamingClient interfaces. It is used in generated code.
-type GenericClientStream[Req any, Res any] struct {
- ClientStream
-}
-
-var _ ServerStreamingClient[string] = (*GenericClientStream[int, string])(nil)
-var _ ClientStreamingClient[int, string] = (*GenericClientStream[int, string])(nil)
-var _ BidiStreamingClient[int, string] = (*GenericClientStream[int, string])(nil)
-
-// Send pushes one message into the stream of requests to be consumed by the
-// server. The type of message which can be sent is determined by the Req type
-// parameter of the GenericClientStream receiver.
-func (x *GenericClientStream[Req, Res]) Send(m *Req) error {
- return x.ClientStream.SendMsg(m)
-}
-
-// Recv reads one message from the stream of responses generated by the server.
-// The type of the message returned is determined by the Res type parameter
-// of the GenericClientStream receiver.
-func (x *GenericClientStream[Req, Res]) Recv() (*Res, error) {
- m := new(Res)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// CloseAndRecv closes the sending side of the stream, then receives the unary
-// response from the server. The type of message which it returns is determined
-// by the Res type parameter of the GenericClientStream receiver.
-func (x *GenericClientStream[Req, Res]) CloseAndRecv() (*Res, error) {
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- m := new(Res)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// GenericServerStream implements the ServerStreamingServer, ClientStreamingServer,
-// and BidiStreamingServer interfaces. It is used in generated code.
-type GenericServerStream[Req any, Res any] struct {
- ServerStream
-}
-
-var _ ServerStreamingServer[string] = (*GenericServerStream[int, string])(nil)
-var _ ClientStreamingServer[int, string] = (*GenericServerStream[int, string])(nil)
-var _ BidiStreamingServer[int, string] = (*GenericServerStream[int, string])(nil)
-
-// Send pushes one message into the stream of responses to be consumed by the
-// client. The type of message which can be sent is determined by the Res
-// type parameter of the serverStreamServer receiver.
-func (x *GenericServerStream[Req, Res]) Send(m *Res) error {
- return x.ServerStream.SendMsg(m)
-}
-
-// SendAndClose pushes the unary response to the client. The type of message
-// which can be sent is determined by the Res type parameter of the
-// clientStreamServer receiver.
-func (x *GenericServerStream[Req, Res]) SendAndClose(m *Res) error {
- return x.ServerStream.SendMsg(m)
-}
-
-// Recv reads one message from the stream of requests generated by the client.
-// The type of the message returned is determined by the Req type parameter
-// of the clientStreamServer receiver.
-func (x *GenericServerStream[Req, Res]) Recv() (*Req, error) {
- m := new(Req)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go
deleted file mode 100644
index 07f012576..000000000
--- a/vendor/google.golang.org/grpc/tap/tap.go
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package tap defines the function handles which are executed on the transport
-// layer of gRPC-Go and related information.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-package tap
-
-import (
- "context"
-
- "google.golang.org/grpc/metadata"
-)
-
-// Info defines the relevant information needed by the handles.
-type Info struct {
- // FullMethodName is the string of grpc method (in the format of
- // /package.service/method).
- FullMethodName string
-
- // Header contains the header metadata received.
- Header metadata.MD
-
- // TODO: More to be added.
-}
-
-// ServerInHandle defines the function which runs before a new stream is
-// created on the server side. If it returns a non-nil error, the stream will
-// not be created and an error will be returned to the client. If the error
-// returned is a status error, that status code and message will be used,
-// otherwise PermissionDenied will be the code and err.Error() will be the
-// message.
-//
-// It's intended to be used in situations where you don't want to waste the
-// resources to accept the new stream (e.g. rate-limiting). For other general
-// usages, please use interceptors.
-//
-// Note that it is executed in the per-connection I/O goroutine(s) instead of
-// per-RPC goroutine. Therefore, users should NOT have any
-// blocking/time-consuming work in this handle. Otherwise all the RPCs would
-// slow down. Also, for the same reason, this handle won't be called
-// concurrently by gRPC.
-type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error)
diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go
deleted file mode 100644
index 10f4f798f..000000000
--- a/vendor/google.golang.org/grpc/trace.go
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "bytes"
- "fmt"
- "io"
- "net"
- "strings"
- "sync"
- "time"
-)
-
-// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package.
-// This should only be set before any RPCs are sent or received by this program.
-var EnableTracing bool
-
-// methodFamily returns the trace family for the given method.
-// It turns "/pkg.Service/GetFoo" into "pkg.Service".
-func methodFamily(m string) string {
- m = strings.TrimPrefix(m, "/") // remove leading slash
- if i := strings.Index(m, "/"); i >= 0 {
- m = m[:i] // remove everything from second slash
- }
- return m
-}
-
-// traceEventLog mirrors golang.org/x/net/trace.EventLog.
-//
-// It exists in order to avoid importing x/net/trace on grpcnotrace builds.
-type traceEventLog interface {
- Printf(format string, a ...any)
- Errorf(format string, a ...any)
- Finish()
-}
-
-// traceLog mirrors golang.org/x/net/trace.Trace.
-//
-// It exists in order to avoid importing x/net/trace on grpcnotrace builds.
-type traceLog interface {
- LazyLog(x fmt.Stringer, sensitive bool)
- LazyPrintf(format string, a ...any)
- SetError()
- SetRecycler(f func(any))
- SetTraceInfo(traceID, spanID uint64)
- SetMaxEvents(m int)
- Finish()
-}
-
-// traceInfo contains tracing information for an RPC.
-type traceInfo struct {
- tr traceLog
- firstLine firstLine
-}
-
-// firstLine is the first line of an RPC trace.
-// It may be mutated after construction; remoteAddr specifically may change
-// during client-side use.
-type firstLine struct {
- mu sync.Mutex
- client bool // whether this is a client (outgoing) RPC
- remoteAddr net.Addr
- deadline time.Duration // may be zero
-}
-
-func (f *firstLine) SetRemoteAddr(addr net.Addr) {
- f.mu.Lock()
- f.remoteAddr = addr
- f.mu.Unlock()
-}
-
-func (f *firstLine) String() string {
- f.mu.Lock()
- defer f.mu.Unlock()
-
- var line bytes.Buffer
- io.WriteString(&line, "RPC: ")
- if f.client {
- io.WriteString(&line, "to")
- } else {
- io.WriteString(&line, "from")
- }
- fmt.Fprintf(&line, " %v deadline:", f.remoteAddr)
- if f.deadline != 0 {
- fmt.Fprint(&line, f.deadline)
- } else {
- io.WriteString(&line, "none")
- }
- return line.String()
-}
-
-const truncateSize = 100
-
-func truncate(x string, l int) string {
- if l > len(x) {
- return x
- }
- return x[:l]
-}
-
-// payload represents an RPC request or response payload.
-type payload struct {
- sent bool // whether this is an outgoing payload
- msg any // e.g. a proto.Message
- // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here?
-}
-
-func (p payload) String() string {
- if p.sent {
- return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize)
- }
- return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize)
-}
-
-type fmtStringer struct {
- format string
- a []any
-}
-
-func (f *fmtStringer) String() string {
- return fmt.Sprintf(f.format, f.a...)
-}
-
-type stringer string
-
-func (s stringer) String() string { return string(s) }
diff --git a/vendor/google.golang.org/grpc/trace_notrace.go b/vendor/google.golang.org/grpc/trace_notrace.go
deleted file mode 100644
index 1da3a2308..000000000
--- a/vendor/google.golang.org/grpc/trace_notrace.go
+++ /dev/null
@@ -1,52 +0,0 @@
-//go:build grpcnotrace
-
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-// grpcnotrace can be used to avoid importing golang.org/x/net/trace, which in
-// turn enables binaries using gRPC-Go for dead code elimination, which can
-// yield 10-15% improvements in binary size when tracing is not needed.
-
-import (
- "context"
- "fmt"
-)
-
-type notrace struct{}
-
-func (notrace) LazyLog(x fmt.Stringer, sensitive bool) {}
-func (notrace) LazyPrintf(format string, a ...any) {}
-func (notrace) SetError() {}
-func (notrace) SetRecycler(f func(any)) {}
-func (notrace) SetTraceInfo(traceID, spanID uint64) {}
-func (notrace) SetMaxEvents(m int) {}
-func (notrace) Finish() {}
-
-func newTrace(family, title string) traceLog {
- return notrace{}
-}
-
-func newTraceContext(ctx context.Context, tr traceLog) context.Context {
- return ctx
-}
-
-func newTraceEventLog(family, title string) traceEventLog {
- return nil
-}
diff --git a/vendor/google.golang.org/grpc/trace_withtrace.go b/vendor/google.golang.org/grpc/trace_withtrace.go
deleted file mode 100644
index 88d6e8571..000000000
--- a/vendor/google.golang.org/grpc/trace_withtrace.go
+++ /dev/null
@@ -1,39 +0,0 @@
-//go:build !grpcnotrace
-
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
-
- t "golang.org/x/net/trace"
-)
-
-func newTrace(family, title string) traceLog {
- return t.New(family, title)
-}
-
-func newTraceContext(ctx context.Context, tr traceLog) context.Context {
- return t.NewContext(ctx, tr)
-}
-
-func newTraceEventLog(family, title string) traceEventLog {
- return t.NewEventLog(family, title)
-}
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
deleted file mode 100644
index 3c148a814..000000000
--- a/vendor/google.golang.org/grpc/version.go
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-// Version is the current grpc version.
-const Version = "1.71.1"
diff --git a/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go
deleted file mode 100644
index 2ef36bbcf..000000000
--- a/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package protodelim marshals and unmarshals varint size-delimited messages.
-package protodelim
-
-import (
- "bufio"
- "encoding/binary"
- "fmt"
- "io"
-
- "google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/internal/errors"
- "google.golang.org/protobuf/proto"
-)
-
-// MarshalOptions is a configurable varint size-delimited marshaler.
-type MarshalOptions struct{ proto.MarshalOptions }
-
-// MarshalTo writes a varint size-delimited wire-format message to w.
-// If w returns an error, MarshalTo returns it unchanged.
-func (o MarshalOptions) MarshalTo(w io.Writer, m proto.Message) (int, error) {
- msgBytes, err := o.MarshalOptions.Marshal(m)
- if err != nil {
- return 0, err
- }
-
- sizeBytes := protowire.AppendVarint(nil, uint64(len(msgBytes)))
- sizeWritten, err := w.Write(sizeBytes)
- if err != nil {
- return sizeWritten, err
- }
- msgWritten, err := w.Write(msgBytes)
- if err != nil {
- return sizeWritten + msgWritten, err
- }
- return sizeWritten + msgWritten, nil
-}
-
-// MarshalTo writes a varint size-delimited wire-format message to w
-// with the default options.
-//
-// See the documentation for [MarshalOptions.MarshalTo].
-func MarshalTo(w io.Writer, m proto.Message) (int, error) {
- return MarshalOptions{}.MarshalTo(w, m)
-}
-
-// UnmarshalOptions is a configurable varint size-delimited unmarshaler.
-type UnmarshalOptions struct {
- proto.UnmarshalOptions
-
- // MaxSize is the maximum size in wire-format bytes of a single message.
- // Unmarshaling a message larger than MaxSize will return an error.
- // A zero MaxSize will default to 4 MiB.
- // Setting MaxSize to -1 disables the limit.
- MaxSize int64
-}
-
-const defaultMaxSize = 4 << 20 // 4 MiB, corresponds to the default gRPC max request/response size
-
-// SizeTooLargeError is an error that is returned when the unmarshaler encounters a message size
-// that is larger than its configured [UnmarshalOptions.MaxSize].
-type SizeTooLargeError struct {
- // Size is the varint size of the message encountered
- // that was larger than the provided MaxSize.
- Size uint64
-
- // MaxSize is the MaxSize limit configured in UnmarshalOptions, which Size exceeded.
- MaxSize uint64
-}
-
-func (e *SizeTooLargeError) Error() string {
- return fmt.Sprintf("message size %d exceeded unmarshaler's maximum configured size %d", e.Size, e.MaxSize)
-}
-
-// Reader is the interface expected by [UnmarshalFrom].
-// It is implemented by *[bufio.Reader].
-type Reader interface {
- io.Reader
- io.ByteReader
-}
-
-// UnmarshalFrom parses and consumes a varint size-delimited wire-format message
-// from r.
-// The provided message must be mutable (e.g., a non-nil pointer to a message).
-//
-// The error is [io.EOF] error only if no bytes are read.
-// If an EOF happens after reading some but not all the bytes,
-// UnmarshalFrom returns a non-io.EOF error.
-// In particular if r returns a non-io.EOF error, UnmarshalFrom returns it unchanged,
-// and if only a size is read with no subsequent message, [io.ErrUnexpectedEOF] is returned.
-func (o UnmarshalOptions) UnmarshalFrom(r Reader, m proto.Message) error {
- var sizeArr [binary.MaxVarintLen64]byte
- sizeBuf := sizeArr[:0]
- for i := range sizeArr {
- b, err := r.ReadByte()
- if err != nil {
- // Immediate EOF is unexpected.
- if err == io.EOF && i != 0 {
- break
- }
- return err
- }
- sizeBuf = append(sizeBuf, b)
- if b < 0x80 {
- break
- }
- }
- size, n := protowire.ConsumeVarint(sizeBuf)
- if n < 0 {
- return protowire.ParseError(n)
- }
-
- maxSize := o.MaxSize
- if maxSize == 0 {
- maxSize = defaultMaxSize
- }
- if maxSize != -1 && size > uint64(maxSize) {
- return errors.Wrap(&SizeTooLargeError{Size: size, MaxSize: uint64(maxSize)}, "")
- }
-
- var b []byte
- var err error
- if br, ok := r.(*bufio.Reader); ok {
- // Use the []byte from the bufio.Reader instead of having to allocate one.
- // This reduces CPU usage and allocated bytes.
- b, err = br.Peek(int(size))
- if err == nil {
- defer br.Discard(int(size))
- } else {
- b = nil
- }
- }
- if b == nil {
- b = make([]byte, size)
- _, err = io.ReadFull(r, b)
- }
-
- if err == io.EOF {
- return io.ErrUnexpectedEOF
- }
- if err != nil {
- return err
- }
- if err := o.Unmarshal(b, m); err != nil {
- return err
- }
- return nil
-}
-
-// UnmarshalFrom parses and consumes a varint size-delimited wire-format message
-// from r with the default options.
-// The provided message must be mutable (e.g., a non-nil pointer to a message).
-//
-// See the documentation for [UnmarshalOptions.UnmarshalFrom].
-func UnmarshalFrom(r Reader, m proto.Message) error {
- return UnmarshalOptions{}.UnmarshalFrom(r, m)
-}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
deleted file mode 100644
index 737d6876d..000000000
--- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
+++ /dev/null
@@ -1,680 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package protojson
-
-import (
- "encoding/base64"
- "fmt"
- "math"
- "strconv"
- "strings"
-
- "google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/internal/encoding/json"
- "google.golang.org/protobuf/internal/encoding/messageset"
- "google.golang.org/protobuf/internal/errors"
- "google.golang.org/protobuf/internal/flags"
- "google.golang.org/protobuf/internal/genid"
- "google.golang.org/protobuf/internal/pragma"
- "google.golang.org/protobuf/internal/set"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-)
-
-// Unmarshal reads the given []byte into the given [proto.Message].
-// The provided message must be mutable (e.g., a non-nil pointer to a message).
-func Unmarshal(b []byte, m proto.Message) error {
- return UnmarshalOptions{}.Unmarshal(b, m)
-}
-
-// UnmarshalOptions is a configurable JSON format parser.
-type UnmarshalOptions struct {
- pragma.NoUnkeyedLiterals
-
- // If AllowPartial is set, input for messages that will result in missing
- // required fields will not return an error.
- AllowPartial bool
-
- // If DiscardUnknown is set, unknown fields and enum name values are ignored.
- DiscardUnknown bool
-
- // Resolver is used for looking up types when unmarshaling
- // google.protobuf.Any messages or extension fields.
- // If nil, this defaults to using protoregistry.GlobalTypes.
- Resolver interface {
- protoregistry.MessageTypeResolver
- protoregistry.ExtensionTypeResolver
- }
-
- // RecursionLimit limits how deeply messages may be nested.
- // If zero, a default limit is applied.
- RecursionLimit int
-}
-
-// Unmarshal reads the given []byte and populates the given [proto.Message]
-// using options in the UnmarshalOptions object.
-// It will clear the message first before setting the fields.
-// If it returns an error, the given message may be partially set.
-// The provided message must be mutable (e.g., a non-nil pointer to a message).
-func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
- return o.unmarshal(b, m)
-}
-
-// unmarshal is a centralized function that all unmarshal operations go through.
-// For profiling purposes, avoid changing the name of this function or
-// introducing other code paths for unmarshal that do not go through this.
-func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error {
- proto.Reset(m)
-
- if o.Resolver == nil {
- o.Resolver = protoregistry.GlobalTypes
- }
- if o.RecursionLimit == 0 {
- o.RecursionLimit = protowire.DefaultRecursionLimit
- }
-
- dec := decoder{json.NewDecoder(b), o}
- if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil {
- return err
- }
-
- // Check for EOF.
- tok, err := dec.Read()
- if err != nil {
- return err
- }
- if tok.Kind() != json.EOF {
- return dec.unexpectedTokenError(tok)
- }
-
- if o.AllowPartial {
- return nil
- }
- return proto.CheckInitialized(m)
-}
-
-type decoder struct {
- *json.Decoder
- opts UnmarshalOptions
-}
-
-// newError returns an error object with position info.
-func (d decoder) newError(pos int, f string, x ...any) error {
- line, column := d.Position(pos)
- head := fmt.Sprintf("(line %d:%d): ", line, column)
- return errors.New(head+f, x...)
-}
-
-// unexpectedTokenError returns a syntax error for the given unexpected token.
-func (d decoder) unexpectedTokenError(tok json.Token) error {
- return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString())
-}
-
-// syntaxError returns a syntax error for given position.
-func (d decoder) syntaxError(pos int, f string, x ...any) error {
- line, column := d.Position(pos)
- head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
- return errors.New(head+f, x...)
-}
-
-// unmarshalMessage unmarshals a message into the given protoreflect.Message.
-func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error {
- d.opts.RecursionLimit--
- if d.opts.RecursionLimit < 0 {
- return errors.New("exceeded max recursion depth")
- }
- if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil {
- return unmarshal(d, m)
- }
-
- tok, err := d.Read()
- if err != nil {
- return err
- }
- if tok.Kind() != json.ObjectOpen {
- return d.unexpectedTokenError(tok)
- }
-
- messageDesc := m.Descriptor()
- if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
- return errors.New("no support for proto1 MessageSets")
- }
-
- var seenNums set.Ints
- var seenOneofs set.Ints
- fieldDescs := messageDesc.Fields()
- for {
- // Read field name.
- tok, err := d.Read()
- if err != nil {
- return err
- }
- switch tok.Kind() {
- default:
- return d.unexpectedTokenError(tok)
- case json.ObjectClose:
- return nil
- case json.Name:
- // Continue below.
- }
-
- name := tok.Name()
- // Unmarshaling a non-custom embedded message in Any will contain the
- // JSON field "@type" which should be skipped because it is not a field
- // of the embedded message, but simply an artifact of the Any format.
- if skipTypeURL && name == "@type" {
- d.Read()
- continue
- }
-
- // Get the FieldDescriptor.
- var fd protoreflect.FieldDescriptor
- if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") {
- // Only extension names are in [name] format.
- extName := protoreflect.FullName(name[1 : len(name)-1])
- extType, err := d.opts.Resolver.FindExtensionByName(extName)
- if err != nil && err != protoregistry.NotFound {
- return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err)
- }
- if extType != nil {
- fd = extType.TypeDescriptor()
- if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() {
- return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName())
- }
- }
- } else {
- // The name can either be the JSON name or the proto field name.
- fd = fieldDescs.ByJSONName(name)
- if fd == nil {
- fd = fieldDescs.ByTextName(name)
- }
- }
-
- if fd == nil {
- // Field is unknown.
- if d.opts.DiscardUnknown {
- if err := d.skipJSONValue(); err != nil {
- return err
- }
- continue
- }
- return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
- }
-
- // Do not allow duplicate fields.
- num := uint64(fd.Number())
- if seenNums.Has(num) {
- return d.newError(tok.Pos(), "duplicate field %v", tok.RawString())
- }
- seenNums.Set(num)
-
- // No need to set values for JSON null unless the field type is
- // google.protobuf.Value or google.protobuf.NullValue.
- if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) {
- d.Read()
- continue
- }
-
- switch {
- case fd.IsList():
- list := m.Mutable(fd).List()
- if err := d.unmarshalList(list, fd); err != nil {
- return err
- }
- case fd.IsMap():
- mmap := m.Mutable(fd).Map()
- if err := d.unmarshalMap(mmap, fd); err != nil {
- return err
- }
- default:
- // If field is a oneof, check if it has already been set.
- if od := fd.ContainingOneof(); od != nil {
- idx := uint64(od.Index())
- if seenOneofs.Has(idx) {
- return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName())
- }
- seenOneofs.Set(idx)
- }
-
- // Required or optional fields.
- if err := d.unmarshalSingular(m, fd); err != nil {
- return err
- }
- }
- }
-}
-
-func isKnownValue(fd protoreflect.FieldDescriptor) bool {
- md := fd.Message()
- return md != nil && md.FullName() == genid.Value_message_fullname
-}
-
-func isNullValue(fd protoreflect.FieldDescriptor) bool {
- ed := fd.Enum()
- return ed != nil && ed.FullName() == genid.NullValue_enum_fullname
-}
-
-// unmarshalSingular unmarshals to the non-repeated field specified
-// by the given FieldDescriptor.
-func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error {
- var val protoreflect.Value
- var err error
- switch fd.Kind() {
- case protoreflect.MessageKind, protoreflect.GroupKind:
- val = m.NewField(fd)
- err = d.unmarshalMessage(val.Message(), false)
- default:
- val, err = d.unmarshalScalar(fd)
- }
-
- if err != nil {
- return err
- }
- if val.IsValid() {
- m.Set(fd, val)
- }
- return nil
-}
-
-// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by
-// the given FieldDescriptor.
-func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
- const b32 int = 32
- const b64 int = 64
-
- tok, err := d.Read()
- if err != nil {
- return protoreflect.Value{}, err
- }
-
- kind := fd.Kind()
- switch kind {
- case protoreflect.BoolKind:
- if tok.Kind() == json.Bool {
- return protoreflect.ValueOfBool(tok.Bool()), nil
- }
-
- case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
- if v, ok := unmarshalInt(tok, b32); ok {
- return v, nil
- }
-
- case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
- if v, ok := unmarshalInt(tok, b64); ok {
- return v, nil
- }
-
- case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
- if v, ok := unmarshalUint(tok, b32); ok {
- return v, nil
- }
-
- case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
- if v, ok := unmarshalUint(tok, b64); ok {
- return v, nil
- }
-
- case protoreflect.FloatKind:
- if v, ok := unmarshalFloat(tok, b32); ok {
- return v, nil
- }
-
- case protoreflect.DoubleKind:
- if v, ok := unmarshalFloat(tok, b64); ok {
- return v, nil
- }
-
- case protoreflect.StringKind:
- if tok.Kind() == json.String {
- return protoreflect.ValueOfString(tok.ParsedString()), nil
- }
-
- case protoreflect.BytesKind:
- if v, ok := unmarshalBytes(tok); ok {
- return v, nil
- }
-
- case protoreflect.EnumKind:
- if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok {
- return v, nil
- }
-
- default:
- panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind))
- }
-
- return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString())
-}
-
-func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
- switch tok.Kind() {
- case json.Number:
- return getInt(tok, bitSize)
-
- case json.String:
- // Decode number from string.
- s := strings.TrimSpace(tok.ParsedString())
- if len(s) != len(tok.ParsedString()) {
- return protoreflect.Value{}, false
- }
- dec := json.NewDecoder([]byte(s))
- tok, err := dec.Read()
- if err != nil {
- return protoreflect.Value{}, false
- }
- return getInt(tok, bitSize)
- }
- return protoreflect.Value{}, false
-}
-
-func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
- n, ok := tok.Int(bitSize)
- if !ok {
- return protoreflect.Value{}, false
- }
- if bitSize == 32 {
- return protoreflect.ValueOfInt32(int32(n)), true
- }
- return protoreflect.ValueOfInt64(n), true
-}
-
-func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) {
- switch tok.Kind() {
- case json.Number:
- return getUint(tok, bitSize)
-
- case json.String:
- // Decode number from string.
- s := strings.TrimSpace(tok.ParsedString())
- if len(s) != len(tok.ParsedString()) {
- return protoreflect.Value{}, false
- }
- dec := json.NewDecoder([]byte(s))
- tok, err := dec.Read()
- if err != nil {
- return protoreflect.Value{}, false
- }
- return getUint(tok, bitSize)
- }
- return protoreflect.Value{}, false
-}
-
-func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) {
- n, ok := tok.Uint(bitSize)
- if !ok {
- return protoreflect.Value{}, false
- }
- if bitSize == 32 {
- return protoreflect.ValueOfUint32(uint32(n)), true
- }
- return protoreflect.ValueOfUint64(n), true
-}
-
-func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) {
- switch tok.Kind() {
- case json.Number:
- return getFloat(tok, bitSize)
-
- case json.String:
- s := tok.ParsedString()
- switch s {
- case "NaN":
- if bitSize == 32 {
- return protoreflect.ValueOfFloat32(float32(math.NaN())), true
- }
- return protoreflect.ValueOfFloat64(math.NaN()), true
- case "Infinity":
- if bitSize == 32 {
- return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true
- }
- return protoreflect.ValueOfFloat64(math.Inf(+1)), true
- case "-Infinity":
- if bitSize == 32 {
- return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true
- }
- return protoreflect.ValueOfFloat64(math.Inf(-1)), true
- }
-
- // Decode number from string.
- if len(s) != len(strings.TrimSpace(s)) {
- return protoreflect.Value{}, false
- }
- dec := json.NewDecoder([]byte(s))
- tok, err := dec.Read()
- if err != nil {
- return protoreflect.Value{}, false
- }
- return getFloat(tok, bitSize)
- }
- return protoreflect.Value{}, false
-}
-
-func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) {
- n, ok := tok.Float(bitSize)
- if !ok {
- return protoreflect.Value{}, false
- }
- if bitSize == 32 {
- return protoreflect.ValueOfFloat32(float32(n)), true
- }
- return protoreflect.ValueOfFloat64(n), true
-}
-
-func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) {
- if tok.Kind() != json.String {
- return protoreflect.Value{}, false
- }
-
- s := tok.ParsedString()
- enc := base64.StdEncoding
- if strings.ContainsAny(s, "-_") {
- enc = base64.URLEncoding
- }
- if len(s)%4 != 0 {
- enc = enc.WithPadding(base64.NoPadding)
- }
- b, err := enc.DecodeString(s)
- if err != nil {
- return protoreflect.Value{}, false
- }
- return protoreflect.ValueOfBytes(b), true
-}
-
-func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) {
- switch tok.Kind() {
- case json.String:
- // Lookup EnumNumber based on name.
- s := tok.ParsedString()
- if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil {
- return protoreflect.ValueOfEnum(enumVal.Number()), true
- }
- if discardUnknown {
- return protoreflect.Value{}, true
- }
-
- case json.Number:
- if n, ok := tok.Int(32); ok {
- return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true
- }
-
- case json.Null:
- // This is only valid for google.protobuf.NullValue.
- if isNullValue(fd) {
- return protoreflect.ValueOfEnum(0), true
- }
- }
-
- return protoreflect.Value{}, false
-}
-
-func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error {
- tok, err := d.Read()
- if err != nil {
- return err
- }
- if tok.Kind() != json.ArrayOpen {
- return d.unexpectedTokenError(tok)
- }
-
- switch fd.Kind() {
- case protoreflect.MessageKind, protoreflect.GroupKind:
- for {
- tok, err := d.Peek()
- if err != nil {
- return err
- }
-
- if tok.Kind() == json.ArrayClose {
- d.Read()
- return nil
- }
-
- val := list.NewElement()
- if err := d.unmarshalMessage(val.Message(), false); err != nil {
- return err
- }
- list.Append(val)
- }
- default:
- for {
- tok, err := d.Peek()
- if err != nil {
- return err
- }
-
- if tok.Kind() == json.ArrayClose {
- d.Read()
- return nil
- }
-
- val, err := d.unmarshalScalar(fd)
- if err != nil {
- return err
- }
- if val.IsValid() {
- list.Append(val)
- }
- }
- }
-
- return nil
-}
-
-func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error {
- tok, err := d.Read()
- if err != nil {
- return err
- }
- if tok.Kind() != json.ObjectOpen {
- return d.unexpectedTokenError(tok)
- }
-
- // Determine ahead whether map entry is a scalar type or a message type in
- // order to call the appropriate unmarshalMapValue func inside the for loop
- // below.
- var unmarshalMapValue func() (protoreflect.Value, error)
- switch fd.MapValue().Kind() {
- case protoreflect.MessageKind, protoreflect.GroupKind:
- unmarshalMapValue = func() (protoreflect.Value, error) {
- val := mmap.NewValue()
- if err := d.unmarshalMessage(val.Message(), false); err != nil {
- return protoreflect.Value{}, err
- }
- return val, nil
- }
- default:
- unmarshalMapValue = func() (protoreflect.Value, error) {
- return d.unmarshalScalar(fd.MapValue())
- }
- }
-
-Loop:
- for {
- // Read field name.
- tok, err := d.Read()
- if err != nil {
- return err
- }
- switch tok.Kind() {
- default:
- return d.unexpectedTokenError(tok)
- case json.ObjectClose:
- break Loop
- case json.Name:
- // Continue.
- }
-
- // Unmarshal field name.
- pkey, err := d.unmarshalMapKey(tok, fd.MapKey())
- if err != nil {
- return err
- }
-
- // Check for duplicate field name.
- if mmap.Has(pkey) {
- return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString())
- }
-
- // Read and unmarshal field value.
- pval, err := unmarshalMapValue()
- if err != nil {
- return err
- }
- if pval.IsValid() {
- mmap.Set(pkey, pval)
- }
- }
-
- return nil
-}
-
-// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey.
-// A map key type is any integral or string type.
-func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) {
- const b32 = 32
- const b64 = 64
- const base10 = 10
-
- name := tok.Name()
- kind := fd.Kind()
- switch kind {
- case protoreflect.StringKind:
- return protoreflect.ValueOfString(name).MapKey(), nil
-
- case protoreflect.BoolKind:
- switch name {
- case "true":
- return protoreflect.ValueOfBool(true).MapKey(), nil
- case "false":
- return protoreflect.ValueOfBool(false).MapKey(), nil
- }
-
- case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
- if n, err := strconv.ParseInt(name, base10, b32); err == nil {
- return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil
- }
-
- case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
- if n, err := strconv.ParseInt(name, base10, b64); err == nil {
- return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil
- }
-
- case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
- if n, err := strconv.ParseUint(name, base10, b32); err == nil {
- return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil
- }
-
- case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
- if n, err := strconv.ParseUint(name, base10, b64); err == nil {
- return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil
- }
-
- default:
- panic(fmt.Sprintf("invalid kind for map key: %v", kind))
- }
-
- return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString())
-}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
deleted file mode 100644
index ae71007c1..000000000
--- a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package protojson marshals and unmarshals protocol buffer messages as JSON
-// format. It follows the guide at
-// https://protobuf.dev/programming-guides/proto3#json.
-//
-// This package produces a different output than the standard [encoding/json]
-// package, which does not operate correctly on protocol buffer messages.
-package protojson
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
deleted file mode 100644
index 0e72d8537..000000000
--- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
+++ /dev/null
@@ -1,380 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package protojson
-
-import (
- "encoding/base64"
- "fmt"
-
- "google.golang.org/protobuf/internal/encoding/json"
- "google.golang.org/protobuf/internal/encoding/messageset"
- "google.golang.org/protobuf/internal/errors"
- "google.golang.org/protobuf/internal/filedesc"
- "google.golang.org/protobuf/internal/flags"
- "google.golang.org/protobuf/internal/genid"
- "google.golang.org/protobuf/internal/order"
- "google.golang.org/protobuf/internal/pragma"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-)
-
-const defaultIndent = " "
-
-// Format formats the message as a multiline string.
-// This function is only intended for human consumption and ignores errors.
-// Do not depend on the output being stable. Its output will change across
-// different builds of your program, even when using the same version of the
-// protobuf module.
-func Format(m proto.Message) string {
- return MarshalOptions{Multiline: true}.Format(m)
-}
-
-// Marshal writes the given [proto.Message] in JSON format using default options.
-// Do not depend on the output being stable. Its output will change across
-// different builds of your program, even when using the same version of the
-// protobuf module.
-func Marshal(m proto.Message) ([]byte, error) {
- return MarshalOptions{}.Marshal(m)
-}
-
-// MarshalOptions is a configurable JSON format marshaler.
-type MarshalOptions struct {
- pragma.NoUnkeyedLiterals
-
- // Multiline specifies whether the marshaler should format the output in
- // indented-form with every textual element on a new line.
- // If Indent is an empty string, then an arbitrary indent is chosen.
- Multiline bool
-
- // Indent specifies the set of indentation characters to use in a multiline
- // formatted output such that every entry is preceded by Indent and
- // terminated by a newline. If non-empty, then Multiline is treated as true.
- // Indent can only be composed of space or tab characters.
- Indent string
-
- // AllowPartial allows messages that have missing required fields to marshal
- // without returning an error. If AllowPartial is false (the default),
- // Marshal will return error if there are any missing required fields.
- AllowPartial bool
-
- // UseProtoNames uses proto field name instead of lowerCamelCase name in JSON
- // field names.
- UseProtoNames bool
-
- // UseEnumNumbers emits enum values as numbers.
- UseEnumNumbers bool
-
- // EmitUnpopulated specifies whether to emit unpopulated fields. It does not
- // emit unpopulated oneof fields or unpopulated extension fields.
- // The JSON value emitted for unpopulated fields are as follows:
- // ╔═══════╤════════════════════════════╗
- // ║ JSON │ Protobuf field ║
- // ╠═══════╪════════════════════════════╣
- // ║ false │ proto3 boolean fields ║
- // ║ 0 │ proto3 numeric fields ║
- // ║ "" │ proto3 string/bytes fields ║
- // ║ null │ proto2 scalar fields ║
- // ║ null │ message fields ║
- // ║ [] │ list fields ║
- // ║ {} │ map fields ║
- // ╚═══════╧════════════════════════════╝
- EmitUnpopulated bool
-
- // EmitDefaultValues specifies whether to emit default-valued primitive fields,
- // empty lists, and empty maps. The fields affected are as follows:
- // ╔═══════╤════════════════════════════════════════╗
- // ║ JSON │ Protobuf field ║
- // ╠═══════╪════════════════════════════════════════╣
- // ║ false │ non-optional scalar boolean fields ║
- // ║ 0 │ non-optional scalar numeric fields ║
- // ║ "" │ non-optional scalar string/byte fields ║
- // ║ [] │ empty repeated fields ║
- // ║ {} │ empty map fields ║
- // ╚═══════╧════════════════════════════════════════╝
- //
- // Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields,
- // i.e. presence-sensing fields that are omitted will remain omitted to preserve
- // presence-sensing.
- // EmitUnpopulated takes precedence over EmitDefaultValues since the former generates
- // a strict superset of the latter.
- EmitDefaultValues bool
-
- // Resolver is used for looking up types when expanding google.protobuf.Any
- // messages. If nil, this defaults to using protoregistry.GlobalTypes.
- Resolver interface {
- protoregistry.ExtensionTypeResolver
- protoregistry.MessageTypeResolver
- }
-}
-
-// Format formats the message as a string.
-// This method is only intended for human consumption and ignores errors.
-// Do not depend on the output being stable. Its output will change across
-// different builds of your program, even when using the same version of the
-// protobuf module.
-func (o MarshalOptions) Format(m proto.Message) string {
- if m == nil || !m.ProtoReflect().IsValid() {
- return "" // invalid syntax, but okay since this is for debugging
- }
- o.AllowPartial = true
- b, _ := o.Marshal(m)
- return string(b)
-}
-
-// Marshal marshals the given [proto.Message] in the JSON format using options in
-// Do not depend on the output being stable. Its output will change across
-// different builds of your program, even when using the same version of the
-// protobuf module.
-func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
- return o.marshal(nil, m)
-}
-
-// MarshalAppend appends the JSON format encoding of m to b,
-// returning the result.
-func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) {
- return o.marshal(b, m)
-}
-
-// marshal is a centralized function that all marshal operations go through.
-// For profiling purposes, avoid changing the name of this function or
-// introducing other code paths for marshal that do not go through this.
-func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) {
- if o.Multiline && o.Indent == "" {
- o.Indent = defaultIndent
- }
- if o.Resolver == nil {
- o.Resolver = protoregistry.GlobalTypes
- }
-
- internalEnc, err := json.NewEncoder(b, o.Indent)
- if err != nil {
- return nil, err
- }
-
- // Treat nil message interface as an empty message,
- // in which case the output in an empty JSON object.
- if m == nil {
- return append(b, '{', '}'), nil
- }
-
- enc := encoder{internalEnc, o}
- if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil {
- return nil, err
- }
- if o.AllowPartial {
- return enc.Bytes(), nil
- }
- return enc.Bytes(), proto.CheckInitialized(m)
-}
-
-type encoder struct {
- *json.Encoder
- opts MarshalOptions
-}
-
-// typeFieldDesc is a synthetic field descriptor used for the "@type" field.
-var typeFieldDesc = func() protoreflect.FieldDescriptor {
- var fd filedesc.Field
- fd.L0.FullName = "@type"
- fd.L0.Index = -1
- fd.L1.Cardinality = protoreflect.Optional
- fd.L1.Kind = protoreflect.StringKind
- return &fd
-}()
-
-// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method
-// to additionally iterate over a synthetic field for the type URL.
-type typeURLFieldRanger struct {
- order.FieldRanger
- typeURL string
-}
-
-func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
- if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) {
- return
- }
- m.FieldRanger.Range(f)
-}
-
-// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range
-// method to additionally iterate over unpopulated fields.
-type unpopulatedFieldRanger struct {
- protoreflect.Message
-
- skipNull bool
-}
-
-func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
- fds := m.Descriptor().Fields()
- for i := 0; i < fds.Len(); i++ {
- fd := fds.Get(i)
- if m.Has(fd) || fd.ContainingOneof() != nil {
- continue // ignore populated fields and fields within a oneofs
- }
-
- v := m.Get(fd)
- if fd.HasPresence() {
- if m.skipNull {
- continue
- }
- v = protoreflect.Value{} // use invalid value to emit null
- }
- if !f(fd, v) {
- return
- }
- }
- m.Message.Range(f)
-}
-
-// marshalMessage marshals the fields in the given protoreflect.Message.
-// If the typeURL is non-empty, then a synthetic "@type" field is injected
-// containing the URL as the value.
-func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error {
- if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) {
- return errors.New("no support for proto1 MessageSets")
- }
-
- if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil {
- return marshal(e, m)
- }
-
- e.StartObject()
- defer e.EndObject()
-
- var fields order.FieldRanger = m
- switch {
- case e.opts.EmitUnpopulated:
- fields = unpopulatedFieldRanger{Message: m, skipNull: false}
- case e.opts.EmitDefaultValues:
- fields = unpopulatedFieldRanger{Message: m, skipNull: true}
- }
- if typeURL != "" {
- fields = typeURLFieldRanger{fields, typeURL}
- }
-
- var err error
- order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- name := fd.JSONName()
- if e.opts.UseProtoNames {
- name = fd.TextName()
- }
-
- if err = e.WriteName(name); err != nil {
- return false
- }
- if err = e.marshalValue(v, fd); err != nil {
- return false
- }
- return true
- })
- return err
-}
-
-// marshalValue marshals the given protoreflect.Value.
-func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
- switch {
- case fd.IsList():
- return e.marshalList(val.List(), fd)
- case fd.IsMap():
- return e.marshalMap(val.Map(), fd)
- default:
- return e.marshalSingular(val, fd)
- }
-}
-
-// marshalSingular marshals the given non-repeated field value. This includes
-// all scalar types, enums, messages, and groups.
-func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
- if !val.IsValid() {
- e.WriteNull()
- return nil
- }
-
- switch kind := fd.Kind(); kind {
- case protoreflect.BoolKind:
- e.WriteBool(val.Bool())
-
- case protoreflect.StringKind:
- if e.WriteString(val.String()) != nil {
- return errors.InvalidUTF8(string(fd.FullName()))
- }
-
- case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
- e.WriteInt(val.Int())
-
- case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
- e.WriteUint(val.Uint())
-
- case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind,
- protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind:
- // 64-bit integers are written out as JSON string.
- e.WriteString(val.String())
-
- case protoreflect.FloatKind:
- // Encoder.WriteFloat handles the special numbers NaN and infinites.
- e.WriteFloat(val.Float(), 32)
-
- case protoreflect.DoubleKind:
- // Encoder.WriteFloat handles the special numbers NaN and infinites.
- e.WriteFloat(val.Float(), 64)
-
- case protoreflect.BytesKind:
- e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes()))
-
- case protoreflect.EnumKind:
- if fd.Enum().FullName() == genid.NullValue_enum_fullname {
- e.WriteNull()
- } else {
- desc := fd.Enum().Values().ByNumber(val.Enum())
- if e.opts.UseEnumNumbers || desc == nil {
- e.WriteInt(int64(val.Enum()))
- } else {
- e.WriteString(string(desc.Name()))
- }
- }
-
- case protoreflect.MessageKind, protoreflect.GroupKind:
- if err := e.marshalMessage(val.Message(), ""); err != nil {
- return err
- }
-
- default:
- panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind))
- }
- return nil
-}
-
-// marshalList marshals the given protoreflect.List.
-func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error {
- e.StartArray()
- defer e.EndArray()
-
- for i := 0; i < list.Len(); i++ {
- item := list.Get(i)
- if err := e.marshalSingular(item, fd); err != nil {
- return err
- }
- }
- return nil
-}
-
-// marshalMap marshals given protoreflect.Map.
-func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error {
- e.StartObject()
- defer e.EndObject()
-
- var err error
- order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool {
- if err = e.WriteName(k.String()); err != nil {
- return false
- }
- if err = e.marshalSingular(v, fd.MapValue()); err != nil {
- return false
- }
- return true
- })
- return err
-}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
deleted file mode 100644
index e9fe10394..000000000
--- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
+++ /dev/null
@@ -1,880 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package protojson
-
-import (
- "bytes"
- "fmt"
- "math"
- "strconv"
- "strings"
- "time"
-
- "google.golang.org/protobuf/internal/encoding/json"
- "google.golang.org/protobuf/internal/errors"
- "google.golang.org/protobuf/internal/genid"
- "google.golang.org/protobuf/internal/strs"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-type marshalFunc func(encoder, protoreflect.Message) error
-
-// wellKnownTypeMarshaler returns a marshal function if the message type
-// has specialized serialization behavior. It returns nil otherwise.
-func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc {
- if name.Parent() == genid.GoogleProtobuf_package {
- switch name.Name() {
- case genid.Any_message_name:
- return encoder.marshalAny
- case genid.Timestamp_message_name:
- return encoder.marshalTimestamp
- case genid.Duration_message_name:
- return encoder.marshalDuration
- case genid.BoolValue_message_name,
- genid.Int32Value_message_name,
- genid.Int64Value_message_name,
- genid.UInt32Value_message_name,
- genid.UInt64Value_message_name,
- genid.FloatValue_message_name,
- genid.DoubleValue_message_name,
- genid.StringValue_message_name,
- genid.BytesValue_message_name:
- return encoder.marshalWrapperType
- case genid.Struct_message_name:
- return encoder.marshalStruct
- case genid.ListValue_message_name:
- return encoder.marshalListValue
- case genid.Value_message_name:
- return encoder.marshalKnownValue
- case genid.FieldMask_message_name:
- return encoder.marshalFieldMask
- case genid.Empty_message_name:
- return encoder.marshalEmpty
- }
- }
- return nil
-}
-
-type unmarshalFunc func(decoder, protoreflect.Message) error
-
-// wellKnownTypeUnmarshaler returns a unmarshal function if the message type
-// has specialized serialization behavior. It returns nil otherwise.
-func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc {
- if name.Parent() == genid.GoogleProtobuf_package {
- switch name.Name() {
- case genid.Any_message_name:
- return decoder.unmarshalAny
- case genid.Timestamp_message_name:
- return decoder.unmarshalTimestamp
- case genid.Duration_message_name:
- return decoder.unmarshalDuration
- case genid.BoolValue_message_name,
- genid.Int32Value_message_name,
- genid.Int64Value_message_name,
- genid.UInt32Value_message_name,
- genid.UInt64Value_message_name,
- genid.FloatValue_message_name,
- genid.DoubleValue_message_name,
- genid.StringValue_message_name,
- genid.BytesValue_message_name:
- return decoder.unmarshalWrapperType
- case genid.Struct_message_name:
- return decoder.unmarshalStruct
- case genid.ListValue_message_name:
- return decoder.unmarshalListValue
- case genid.Value_message_name:
- return decoder.unmarshalKnownValue
- case genid.FieldMask_message_name:
- return decoder.unmarshalFieldMask
- case genid.Empty_message_name:
- return decoder.unmarshalEmpty
- }
- }
- return nil
-}
-
-// The JSON representation of an Any message uses the regular representation of
-// the deserialized, embedded message, with an additional field `@type` which
-// contains the type URL. If the embedded message type is well-known and has a
-// custom JSON representation, that representation will be embedded adding a
-// field `value` which holds the custom JSON in addition to the `@type` field.
-
-func (e encoder) marshalAny(m protoreflect.Message) error {
- fds := m.Descriptor().Fields()
- fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
- fdValue := fds.ByNumber(genid.Any_Value_field_number)
-
- if !m.Has(fdType) {
- if !m.Has(fdValue) {
- // If message is empty, marshal out empty JSON object.
- e.StartObject()
- e.EndObject()
- return nil
- } else {
- // Return error if type_url field is not set, but value is set.
- return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name)
- }
- }
-
- typeVal := m.Get(fdType)
- valueVal := m.Get(fdValue)
-
- // Resolve the type in order to unmarshal value field.
- typeURL := typeVal.String()
- emt, err := e.opts.Resolver.FindMessageByURL(typeURL)
- if err != nil {
- return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err)
- }
-
- em := emt.New()
- err = proto.UnmarshalOptions{
- AllowPartial: true, // never check required fields inside an Any
- Resolver: e.opts.Resolver,
- }.Unmarshal(valueVal.Bytes(), em.Interface())
- if err != nil {
- return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err)
- }
-
- // If type of value has custom JSON encoding, marshal out a field "value"
- // with corresponding custom JSON encoding of the embedded message as a
- // field.
- if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil {
- e.StartObject()
- defer e.EndObject()
-
- // Marshal out @type field.
- e.WriteName("@type")
- if err := e.WriteString(typeURL); err != nil {
- return err
- }
-
- e.WriteName("value")
- return marshal(e, em)
- }
-
- // Else, marshal out the embedded message's fields in this Any object.
- if err := e.marshalMessage(em, typeURL); err != nil {
- return err
- }
-
- return nil
-}
-
-func (d decoder) unmarshalAny(m protoreflect.Message) error {
- // Peek to check for json.ObjectOpen to avoid advancing a read.
- start, err := d.Peek()
- if err != nil {
- return err
- }
- if start.Kind() != json.ObjectOpen {
- return d.unexpectedTokenError(start)
- }
-
- // Use another decoder to parse the unread bytes for @type field. This
- // avoids advancing a read from current decoder because the current JSON
- // object may contain the fields of the embedded type.
- dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}}
- tok, err := findTypeURL(dec)
- switch err {
- case errEmptyObject:
- // An empty JSON object translates to an empty Any message.
- d.Read() // Read json.ObjectOpen.
- d.Read() // Read json.ObjectClose.
- return nil
-
- case errMissingType:
- if d.opts.DiscardUnknown {
- // Treat all fields as unknowns, similar to an empty object.
- return d.skipJSONValue()
- }
- // Use start.Pos() for line position.
- return d.newError(start.Pos(), err.Error())
-
- default:
- if err != nil {
- return err
- }
- }
-
- typeURL := tok.ParsedString()
- emt, err := d.opts.Resolver.FindMessageByURL(typeURL)
- if err != nil {
- return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err)
- }
-
- // Create new message for the embedded message type and unmarshal into it.
- em := emt.New()
- if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil {
- // If embedded message is a custom type,
- // unmarshal the JSON "value" field into it.
- if err := d.unmarshalAnyValue(unmarshal, em); err != nil {
- return err
- }
- } else {
- // Else unmarshal the current JSON object into it.
- if err := d.unmarshalMessage(em, true); err != nil {
- return err
- }
- }
- // Serialize the embedded message and assign the resulting bytes to the
- // proto value field.
- b, err := proto.MarshalOptions{
- AllowPartial: true, // No need to check required fields inside an Any.
- Deterministic: true,
- }.Marshal(em.Interface())
- if err != nil {
- return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err)
- }
-
- fds := m.Descriptor().Fields()
- fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
- fdValue := fds.ByNumber(genid.Any_Value_field_number)
-
- m.Set(fdType, protoreflect.ValueOfString(typeURL))
- m.Set(fdValue, protoreflect.ValueOfBytes(b))
- return nil
-}
-
-var errEmptyObject = fmt.Errorf(`empty object`)
-var errMissingType = fmt.Errorf(`missing "@type" field`)
-
-// findTypeURL returns the token for the "@type" field value from the given
-// JSON bytes. It is expected that the given bytes start with json.ObjectOpen.
-// It returns errEmptyObject if the JSON object is empty or errMissingType if
-// @type field does not exist. It returns other error if the @type field is not
-// valid or other decoding issues.
-func findTypeURL(d decoder) (json.Token, error) {
- var typeURL string
- var typeTok json.Token
- numFields := 0
- // Skip start object.
- d.Read()
-
-Loop:
- for {
- tok, err := d.Read()
- if err != nil {
- return json.Token{}, err
- }
-
- switch tok.Kind() {
- case json.ObjectClose:
- if typeURL == "" {
- // Did not find @type field.
- if numFields > 0 {
- return json.Token{}, errMissingType
- }
- return json.Token{}, errEmptyObject
- }
- break Loop
-
- case json.Name:
- numFields++
- if tok.Name() != "@type" {
- // Skip value.
- if err := d.skipJSONValue(); err != nil {
- return json.Token{}, err
- }
- continue
- }
-
- // Return error if this was previously set already.
- if typeURL != "" {
- return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`)
- }
- // Read field value.
- tok, err := d.Read()
- if err != nil {
- return json.Token{}, err
- }
- if tok.Kind() != json.String {
- return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString())
- }
- typeURL = tok.ParsedString()
- if typeURL == "" {
- return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`)
- }
- typeTok = tok
- }
- }
-
- return typeTok, nil
-}
-
-// skipJSONValue parses a JSON value (null, boolean, string, number, object and
-// array) in order to advance the read to the next JSON value. It relies on
-// the decoder returning an error if the types are not in valid sequence.
-func (d decoder) skipJSONValue() error {
- var open int
- for {
- tok, err := d.Read()
- if err != nil {
- return err
- }
- switch tok.Kind() {
- case json.ObjectClose, json.ArrayClose:
- open--
- case json.ObjectOpen, json.ArrayOpen:
- open++
- if open > d.opts.RecursionLimit {
- return errors.New("exceeded max recursion depth")
- }
- case json.EOF:
- // This can only happen if there's a bug in Decoder.Read.
- // Avoid an infinite loop if this does happen.
- return errors.New("unexpected EOF")
- }
- if open == 0 {
- return nil
- }
- }
-}
-
-// unmarshalAnyValue unmarshals the given custom-type message from the JSON
-// object's "value" field.
-func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error {
- // Skip ObjectOpen, and start reading the fields.
- d.Read()
-
- var found bool // Used for detecting duplicate "value".
- for {
- tok, err := d.Read()
- if err != nil {
- return err
- }
- switch tok.Kind() {
- case json.ObjectClose:
- if !found {
- // We tolerate an omitted `value` field with the google.protobuf.Empty Well-Known-Type,
- // for compatibility with other proto runtimes that have interpreted the spec differently.
- if m.Descriptor().FullName() != genid.Empty_message_fullname {
- return d.newError(tok.Pos(), `missing "value" field`)
- }
- }
- return nil
-
- case json.Name:
- switch tok.Name() {
- case "@type":
- // Skip the value as this was previously parsed already.
- d.Read()
-
- case "value":
- if found {
- return d.newError(tok.Pos(), `duplicate "value" field`)
- }
- // Unmarshal the field value into the given message.
- if err := unmarshal(d, m); err != nil {
- return err
- }
- found = true
-
- default:
- if d.opts.DiscardUnknown {
- if err := d.skipJSONValue(); err != nil {
- return err
- }
- continue
- }
- return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
- }
- }
- }
-}
-
-// Wrapper types are encoded as JSON primitives like string, number or boolean.
-
-func (e encoder) marshalWrapperType(m protoreflect.Message) error {
- fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
- val := m.Get(fd)
- return e.marshalSingular(val, fd)
-}
-
-func (d decoder) unmarshalWrapperType(m protoreflect.Message) error {
- fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
- val, err := d.unmarshalScalar(fd)
- if err != nil {
- return err
- }
- m.Set(fd, val)
- return nil
-}
-
-// The JSON representation for Empty is an empty JSON object.
-
-func (e encoder) marshalEmpty(protoreflect.Message) error {
- e.StartObject()
- e.EndObject()
- return nil
-}
-
-func (d decoder) unmarshalEmpty(protoreflect.Message) error {
- tok, err := d.Read()
- if err != nil {
- return err
- }
- if tok.Kind() != json.ObjectOpen {
- return d.unexpectedTokenError(tok)
- }
-
- for {
- tok, err := d.Read()
- if err != nil {
- return err
- }
- switch tok.Kind() {
- case json.ObjectClose:
- return nil
-
- case json.Name:
- if d.opts.DiscardUnknown {
- if err := d.skipJSONValue(); err != nil {
- return err
- }
- continue
- }
- return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
-
- default:
- return d.unexpectedTokenError(tok)
- }
- }
-}
-
-// The JSON representation for Struct is a JSON object that contains the encoded
-// Struct.fields map and follows the serialization rules for a map.
-
-func (e encoder) marshalStruct(m protoreflect.Message) error {
- fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
- return e.marshalMap(m.Get(fd).Map(), fd)
-}
-
-func (d decoder) unmarshalStruct(m protoreflect.Message) error {
- fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
- return d.unmarshalMap(m.Mutable(fd).Map(), fd)
-}
-
-// The JSON representation for ListValue is JSON array that contains the encoded
-// ListValue.values repeated field and follows the serialization rules for a
-// repeated field.
-
-func (e encoder) marshalListValue(m protoreflect.Message) error {
- fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
- return e.marshalList(m.Get(fd).List(), fd)
-}
-
-func (d decoder) unmarshalListValue(m protoreflect.Message) error {
- fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
- return d.unmarshalList(m.Mutable(fd).List(), fd)
-}
-
-// The JSON representation for a Value is dependent on the oneof field that is
-// set. Each of the field in the oneof has its own custom serialization rule. A
-// Value message needs to be a oneof field set, else it is an error.
-
-func (e encoder) marshalKnownValue(m protoreflect.Message) error {
- od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name)
- fd := m.WhichOneof(od)
- if fd == nil {
- return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname)
- }
- if fd.Number() == genid.Value_NumberValue_field_number {
- if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) {
- return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v)
- }
- }
- return e.marshalSingular(m.Get(fd), fd)
-}
-
-func (d decoder) unmarshalKnownValue(m protoreflect.Message) error {
- tok, err := d.Peek()
- if err != nil {
- return err
- }
-
- var fd protoreflect.FieldDescriptor
- var val protoreflect.Value
- switch tok.Kind() {
- case json.Null:
- d.Read()
- fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number)
- val = protoreflect.ValueOfEnum(0)
-
- case json.Bool:
- tok, err := d.Read()
- if err != nil {
- return err
- }
- fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number)
- val = protoreflect.ValueOfBool(tok.Bool())
-
- case json.Number:
- tok, err := d.Read()
- if err != nil {
- return err
- }
- fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number)
- var ok bool
- val, ok = unmarshalFloat(tok, 64)
- if !ok {
- return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString())
- }
-
- case json.String:
- // A JSON string may have been encoded from the number_value field,
- // e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows
- // for it to be in JSON string form. Given this custom encoding spec,
- // however, there is no way to identify that and hence a JSON string is
- // always assigned to the string_value field, which means that certain
- // encoding cannot be parsed back to the same field.
- tok, err := d.Read()
- if err != nil {
- return err
- }
- fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number)
- val = protoreflect.ValueOfString(tok.ParsedString())
-
- case json.ObjectOpen:
- fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number)
- val = m.NewField(fd)
- if err := d.unmarshalStruct(val.Message()); err != nil {
- return err
- }
-
- case json.ArrayOpen:
- fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number)
- val = m.NewField(fd)
- if err := d.unmarshalListValue(val.Message()); err != nil {
- return err
- }
-
- default:
- return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString())
- }
-
- m.Set(fd, val)
- return nil
-}
-
-// The JSON representation for a Duration is a JSON string that ends in the
-// suffix "s" (indicating seconds) and is preceded by the number of seconds,
-// with nanoseconds expressed as fractional seconds.
-//
-// Durations less than one second are represented with a 0 seconds field and a
-// positive or negative nanos field. For durations of one second or more, a
-// non-zero value for the nanos field must be of the same sign as the seconds
-// field.
-//
-// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive.
-// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive.
-
-const (
- secondsInNanos = 999999999
- maxSecondsInDuration = 315576000000
-)
-
-func (e encoder) marshalDuration(m protoreflect.Message) error {
- fds := m.Descriptor().Fields()
- fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
- fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
-
- secsVal := m.Get(fdSeconds)
- nanosVal := m.Get(fdNanos)
- secs := secsVal.Int()
- nanos := nanosVal.Int()
- if secs < -maxSecondsInDuration || secs > maxSecondsInDuration {
- return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs)
- }
- if nanos < -secondsInNanos || nanos > secondsInNanos {
- return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos)
- }
- if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) {
- return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname)
- }
- // Generated output always contains 0, 3, 6, or 9 fractional digits,
- // depending on required precision, followed by the suffix "s".
- var sign string
- if secs < 0 || nanos < 0 {
- sign, secs, nanos = "-", -1*secs, -1*nanos
- }
- x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos)
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, ".000")
- e.WriteString(x + "s")
- return nil
-}
-
-func (d decoder) unmarshalDuration(m protoreflect.Message) error {
- tok, err := d.Read()
- if err != nil {
- return err
- }
- if tok.Kind() != json.String {
- return d.unexpectedTokenError(tok)
- }
-
- secs, nanos, ok := parseDuration(tok.ParsedString())
- if !ok {
- return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString())
- }
- // Validate seconds. No need to validate nanos because parseDuration would
- // have covered that already.
- if secs < -maxSecondsInDuration || secs > maxSecondsInDuration {
- return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString())
- }
-
- fds := m.Descriptor().Fields()
- fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
- fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
-
- m.Set(fdSeconds, protoreflect.ValueOfInt64(secs))
- m.Set(fdNanos, protoreflect.ValueOfInt32(nanos))
- return nil
-}
-
-// parseDuration parses the given input string for seconds and nanoseconds value
-// for the Duration JSON format. The format is a decimal number with a suffix
-// 's'. It can have optional plus/minus sign. There needs to be at least an
-// integer or fractional part. Fractional part is limited to 9 digits only for
-// nanoseconds precision, regardless of whether there are trailing zero digits.
-// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s.
-func parseDuration(input string) (int64, int32, bool) {
- b := []byte(input)
- size := len(b)
- if size < 2 {
- return 0, 0, false
- }
- if b[size-1] != 's' {
- return 0, 0, false
- }
- b = b[:size-1]
-
- // Read optional plus/minus symbol.
- var neg bool
- switch b[0] {
- case '-':
- neg = true
- b = b[1:]
- case '+':
- b = b[1:]
- }
- if len(b) == 0 {
- return 0, 0, false
- }
-
- // Read the integer part.
- var intp []byte
- switch {
- case b[0] == '0':
- b = b[1:]
-
- case '1' <= b[0] && b[0] <= '9':
- intp = b[0:]
- b = b[1:]
- n := 1
- for len(b) > 0 && '0' <= b[0] && b[0] <= '9' {
- n++
- b = b[1:]
- }
- intp = intp[:n]
-
- case b[0] == '.':
- // Continue below.
-
- default:
- return 0, 0, false
- }
-
- hasFrac := false
- var frac [9]byte
- if len(b) > 0 {
- if b[0] != '.' {
- return 0, 0, false
- }
- // Read the fractional part.
- b = b[1:]
- n := 0
- for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' {
- frac[n] = b[0]
- n++
- b = b[1:]
- }
- // It is not valid if there are more bytes left.
- if len(b) > 0 {
- return 0, 0, false
- }
- // Pad fractional part with 0s.
- for i := n; i < 9; i++ {
- frac[i] = '0'
- }
- hasFrac = true
- }
-
- var secs int64
- if len(intp) > 0 {
- var err error
- secs, err = strconv.ParseInt(string(intp), 10, 64)
- if err != nil {
- return 0, 0, false
- }
- }
-
- var nanos int64
- if hasFrac {
- nanob := bytes.TrimLeft(frac[:], "0")
- if len(nanob) > 0 {
- var err error
- nanos, err = strconv.ParseInt(string(nanob), 10, 32)
- if err != nil {
- return 0, 0, false
- }
- }
- }
-
- if neg {
- if secs > 0 {
- secs = -secs
- }
- if nanos > 0 {
- nanos = -nanos
- }
- }
- return secs, int32(nanos), true
-}
-
-// The JSON representation for a Timestamp is a JSON string in the RFC 3339
-// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where
-// {year} is always expressed using four digits while {month}, {day}, {hour},
-// {min}, and {sec} are zero-padded to two digits each. The fractional seconds,
-// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The
-// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding
-// should always use UTC (as indicated by "Z") and a decoder should be able to
-// accept both UTC and other timezones (as indicated by an offset).
-//
-// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z
-// inclusive.
-// Timestamp.nanos must be from 0 to 999,999,999 inclusive.
-
-const (
- maxTimestampSeconds = 253402300799
- minTimestampSeconds = -62135596800
-)
-
-func (e encoder) marshalTimestamp(m protoreflect.Message) error {
- fds := m.Descriptor().Fields()
- fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
- fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
-
- secsVal := m.Get(fdSeconds)
- nanosVal := m.Get(fdNanos)
- secs := secsVal.Int()
- nanos := nanosVal.Int()
- if secs < minTimestampSeconds || secs > maxTimestampSeconds {
- return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs)
- }
- if nanos < 0 || nanos > secondsInNanos {
- return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos)
- }
- // Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3,
- // 6 or 9 fractional digits.
- t := time.Unix(secs, nanos).UTC()
- x := t.Format("2006-01-02T15:04:05.000000000")
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, ".000")
- e.WriteString(x + "Z")
- return nil
-}
-
-func (d decoder) unmarshalTimestamp(m protoreflect.Message) error {
- tok, err := d.Read()
- if err != nil {
- return err
- }
- if tok.Kind() != json.String {
- return d.unexpectedTokenError(tok)
- }
-
- s := tok.ParsedString()
- t, err := time.Parse(time.RFC3339Nano, s)
- if err != nil {
- return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString())
- }
- // Validate seconds.
- secs := t.Unix()
- if secs < minTimestampSeconds || secs > maxTimestampSeconds {
- return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString())
- }
- // Validate subseconds.
- i := strings.LastIndexByte(s, '.') // start of subsecond field
- j := strings.LastIndexAny(s, "Z-+") // start of timezone field
- if i >= 0 && j >= i && j-i > len(".999999999") {
- return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString())
- }
-
- fds := m.Descriptor().Fields()
- fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
- fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
-
- m.Set(fdSeconds, protoreflect.ValueOfInt64(secs))
- m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond())))
- return nil
-}
-
-// The JSON representation for a FieldMask is a JSON string where paths are
-// separated by a comma. Fields name in each path are converted to/from
-// lower-camel naming conventions. Encoding should fail if the path name would
-// end up differently after a round-trip.
-
-func (e encoder) marshalFieldMask(m protoreflect.Message) error {
- fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number)
- list := m.Get(fd).List()
- paths := make([]string, 0, list.Len())
-
- for i := 0; i < list.Len(); i++ {
- s := list.Get(i).String()
- if !protoreflect.FullName(s).IsValid() {
- return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s)
- }
- // Return error if conversion to camelCase is not reversible.
- cc := strs.JSONCamelCase(s)
- if s != strs.JSONSnakeCase(cc) {
- return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s)
- }
- paths = append(paths, cc)
- }
-
- e.WriteString(strings.Join(paths, ","))
- return nil
-}
-
-func (d decoder) unmarshalFieldMask(m protoreflect.Message) error {
- tok, err := d.Read()
- if err != nil {
- return err
- }
- if tok.Kind() != json.String {
- return d.unexpectedTokenError(tok)
- }
- str := strings.TrimSpace(tok.ParsedString())
- if str == "" {
- return nil
- }
- paths := strings.Split(str, ",")
-
- fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number)
- list := m.Mutable(fd).List()
-
- for _, s0 := range paths {
- s := strs.JSONSnakeCase(s0)
- if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() {
- return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0)
- }
- list.Append(protoreflect.ValueOfString(s))
- }
- return nil
-}
diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
deleted file mode 100644
index bf1aba0e8..000000000
--- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package editionssupport defines constants for editions that are supported.
-package editionssupport
-
-import "google.golang.org/protobuf/types/descriptorpb"
-
-const (
- Minimum = descriptorpb.Edition_EDITION_PROTO2
- Maximum = descriptorpb.Edition_EDITION_2023
-
- // MaximumKnown is the maximum edition that is known to Go Protobuf, but not
- // declared as supported. In other words: end users cannot use it, but
- // testprotos inside Go Protobuf can.
- MaximumKnown = descriptorpb.Edition_EDITION_2024
-)
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
deleted file mode 100644
index ea1d3e65a..000000000
--- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
+++ /dev/null
@@ -1,340 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package json
-
-import (
- "bytes"
- "fmt"
- "io"
- "regexp"
- "unicode/utf8"
-
- "google.golang.org/protobuf/internal/errors"
-)
-
-// call specifies which Decoder method was invoked.
-type call uint8
-
-const (
- readCall call = iota
- peekCall
-)
-
-const unexpectedFmt = "unexpected token %s"
-
-// ErrUnexpectedEOF means that EOF was encountered in the middle of the input.
-var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF)
-
-// Decoder is a token-based JSON decoder.
-type Decoder struct {
- // lastCall is last method called, either readCall or peekCall.
- // Initial value is readCall.
- lastCall call
-
- // lastToken contains the last read token.
- lastToken Token
-
- // lastErr contains the last read error.
- lastErr error
-
- // openStack is a stack containing ObjectOpen and ArrayOpen values. The
- // top of stack represents the object or the array the current value is
- // directly located in.
- openStack []Kind
-
- // orig is used in reporting line and column.
- orig []byte
- // in contains the unconsumed input.
- in []byte
-}
-
-// NewDecoder returns a Decoder to read the given []byte.
-func NewDecoder(b []byte) *Decoder {
- return &Decoder{orig: b, in: b}
-}
-
-// Peek looks ahead and returns the next token kind without advancing a read.
-func (d *Decoder) Peek() (Token, error) {
- defer func() { d.lastCall = peekCall }()
- if d.lastCall == readCall {
- d.lastToken, d.lastErr = d.Read()
- }
- return d.lastToken, d.lastErr
-}
-
-// Read returns the next JSON token.
-// It will return an error if there is no valid token.
-func (d *Decoder) Read() (Token, error) {
- const scalar = Null | Bool | Number | String
-
- defer func() { d.lastCall = readCall }()
- if d.lastCall == peekCall {
- return d.lastToken, d.lastErr
- }
-
- tok, err := d.parseNext()
- if err != nil {
- return Token{}, err
- }
-
- switch tok.kind {
- case EOF:
- if len(d.openStack) != 0 ||
- d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 {
- return Token{}, ErrUnexpectedEOF
- }
-
- case Null:
- if !d.isValueNext() {
- return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
- }
-
- case Bool, Number:
- if !d.isValueNext() {
- return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
- }
-
- case String:
- if d.isValueNext() {
- break
- }
- // This string token should only be for a field name.
- if d.lastToken.kind&(ObjectOpen|comma) == 0 {
- return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
- }
- if len(d.in) == 0 {
- return Token{}, ErrUnexpectedEOF
- }
- if c := d.in[0]; c != ':' {
- return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c))
- }
- tok.kind = Name
- d.consume(1)
-
- case ObjectOpen, ArrayOpen:
- if !d.isValueNext() {
- return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
- }
- d.openStack = append(d.openStack, tok.kind)
-
- case ObjectClose:
- if len(d.openStack) == 0 ||
- d.lastToken.kind&(Name|comma) != 0 ||
- d.openStack[len(d.openStack)-1] != ObjectOpen {
- return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
- }
- d.openStack = d.openStack[:len(d.openStack)-1]
-
- case ArrayClose:
- if len(d.openStack) == 0 ||
- d.lastToken.kind == comma ||
- d.openStack[len(d.openStack)-1] != ArrayOpen {
- return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
- }
- d.openStack = d.openStack[:len(d.openStack)-1]
-
- case comma:
- if len(d.openStack) == 0 ||
- d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 {
- return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
- }
- }
-
- // Update d.lastToken only after validating token to be in the right sequence.
- d.lastToken = tok
-
- if d.lastToken.kind == comma {
- return d.Read()
- }
- return tok, nil
-}
-
-// Any sequence that looks like a non-delimiter (for error reporting).
-var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`)
-
-// parseNext parses for the next JSON token. It returns a Token object for
-// different types, except for Name. It does not handle whether the next token
-// is in a valid sequence or not.
-func (d *Decoder) parseNext() (Token, error) {
- // Trim leading spaces.
- d.consume(0)
-
- in := d.in
- if len(in) == 0 {
- return d.consumeToken(EOF, 0), nil
- }
-
- switch in[0] {
- case 'n':
- if n := matchWithDelim("null", in); n != 0 {
- return d.consumeToken(Null, n), nil
- }
-
- case 't':
- if n := matchWithDelim("true", in); n != 0 {
- return d.consumeBoolToken(true, n), nil
- }
-
- case 'f':
- if n := matchWithDelim("false", in); n != 0 {
- return d.consumeBoolToken(false, n), nil
- }
-
- case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- if n, ok := parseNumber(in); ok {
- return d.consumeToken(Number, n), nil
- }
-
- case '"':
- s, n, err := d.parseString(in)
- if err != nil {
- return Token{}, err
- }
- return d.consumeStringToken(s, n), nil
-
- case '{':
- return d.consumeToken(ObjectOpen, 1), nil
-
- case '}':
- return d.consumeToken(ObjectClose, 1), nil
-
- case '[':
- return d.consumeToken(ArrayOpen, 1), nil
-
- case ']':
- return d.consumeToken(ArrayClose, 1), nil
-
- case ',':
- return d.consumeToken(comma, 1), nil
- }
- return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in))
-}
-
-// newSyntaxError returns an error with line and column information useful for
-// syntax errors.
-func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error {
- e := errors.New(f, x...)
- line, column := d.Position(pos)
- return errors.New("syntax error (line %d:%d): %v", line, column, e)
-}
-
-// Position returns line and column number of given index of the original input.
-// It will panic if index is out of range.
-func (d *Decoder) Position(idx int) (line int, column int) {
- b := d.orig[:idx]
- line = bytes.Count(b, []byte("\n")) + 1
- if i := bytes.LastIndexByte(b, '\n'); i >= 0 {
- b = b[i+1:]
- }
- column = utf8.RuneCount(b) + 1 // ignore multi-rune characters
- return line, column
-}
-
-// currPos returns the current index position of d.in from d.orig.
-func (d *Decoder) currPos() int {
- return len(d.orig) - len(d.in)
-}
-
-// matchWithDelim matches s with the input b and verifies that the match
-// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]").
-// As a special case, EOF is considered a delimiter. It returns the length of s
-// if there is a match, else 0.
-func matchWithDelim(s string, b []byte) int {
- if !bytes.HasPrefix(b, []byte(s)) {
- return 0
- }
-
- n := len(s)
- if n < len(b) && isNotDelim(b[n]) {
- return 0
- }
- return n
-}
-
-// isNotDelim returns true if given byte is a not delimiter character.
-func isNotDelim(c byte) bool {
- return (c == '-' || c == '+' || c == '.' || c == '_' ||
- ('a' <= c && c <= 'z') ||
- ('A' <= c && c <= 'Z') ||
- ('0' <= c && c <= '9'))
-}
-
-// consume consumes n bytes of input and any subsequent whitespace.
-func (d *Decoder) consume(n int) {
- d.in = d.in[n:]
- for len(d.in) > 0 {
- switch d.in[0] {
- case ' ', '\n', '\r', '\t':
- d.in = d.in[1:]
- default:
- return
- }
- }
-}
-
-// isValueNext returns true if next type should be a JSON value: Null,
-// Number, String or Bool.
-func (d *Decoder) isValueNext() bool {
- if len(d.openStack) == 0 {
- return d.lastToken.kind == 0
- }
-
- start := d.openStack[len(d.openStack)-1]
- switch start {
- case ObjectOpen:
- return d.lastToken.kind&Name != 0
- case ArrayOpen:
- return d.lastToken.kind&(ArrayOpen|comma) != 0
- }
- panic(fmt.Sprintf(
- "unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v",
- d.lastToken.kind, start))
-}
-
-// consumeToken constructs a Token for given Kind with raw value derived from
-// current d.in and given size, and consumes the given size-length of it.
-func (d *Decoder) consumeToken(kind Kind, size int) Token {
- tok := Token{
- kind: kind,
- raw: d.in[:size],
- pos: len(d.orig) - len(d.in),
- }
- d.consume(size)
- return tok
-}
-
-// consumeBoolToken constructs a Token for a Bool kind with raw value derived from
-// current d.in and given size.
-func (d *Decoder) consumeBoolToken(b bool, size int) Token {
- tok := Token{
- kind: Bool,
- raw: d.in[:size],
- pos: len(d.orig) - len(d.in),
- boo: b,
- }
- d.consume(size)
- return tok
-}
-
-// consumeStringToken constructs a Token for a String kind with raw value derived
-// from current d.in and given size.
-func (d *Decoder) consumeStringToken(s string, size int) Token {
- tok := Token{
- kind: String,
- raw: d.in[:size],
- pos: len(d.orig) - len(d.in),
- str: s,
- }
- d.consume(size)
- return tok
-}
-
-// Clone returns a copy of the Decoder for use in reading ahead the next JSON
-// object, array or other values without affecting current Decoder.
-func (d *Decoder) Clone() *Decoder {
- ret := *d
- ret.openStack = append([]Kind(nil), ret.openStack...)
- return &ret
-}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go
deleted file mode 100644
index 2999d7133..000000000
--- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go
+++ /dev/null
@@ -1,254 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package json
-
-import (
- "bytes"
- "strconv"
-)
-
-// parseNumber reads the given []byte for a valid JSON number. If it is valid,
-// it returns the number of bytes. Parsing logic follows the definition in
-// https://tools.ietf.org/html/rfc7159#section-6, and is based off
-// encoding/json.isValidNumber function.
-func parseNumber(input []byte) (int, bool) {
- var n int
-
- s := input
- if len(s) == 0 {
- return 0, false
- }
-
- // Optional -
- if s[0] == '-' {
- s = s[1:]
- n++
- if len(s) == 0 {
- return 0, false
- }
- }
-
- // Digits
- switch {
- case s[0] == '0':
- s = s[1:]
- n++
-
- case '1' <= s[0] && s[0] <= '9':
- s = s[1:]
- n++
- for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
- s = s[1:]
- n++
- }
-
- default:
- return 0, false
- }
-
- // . followed by 1 or more digits.
- if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
- s = s[2:]
- n += 2
- for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
- s = s[1:]
- n++
- }
- }
-
- // e or E followed by an optional - or + and
- // 1 or more digits.
- if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
- s = s[1:]
- n++
- if s[0] == '+' || s[0] == '-' {
- s = s[1:]
- n++
- if len(s) == 0 {
- return 0, false
- }
- }
- for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
- s = s[1:]
- n++
- }
- }
-
- // Check that next byte is a delimiter or it is at the end.
- if n < len(input) && isNotDelim(input[n]) {
- return 0, false
- }
-
- return n, true
-}
-
-// numberParts is the result of parsing out a valid JSON number. It contains
-// the parts of a number. The parts are used for integer conversion.
-type numberParts struct {
- neg bool
- intp []byte
- frac []byte
- exp []byte
-}
-
-// parseNumber constructs numberParts from given []byte. The logic here is
-// similar to consumeNumber above with the difference of having to construct
-// numberParts. The slice fields in numberParts are subslices of the input.
-func parseNumberParts(input []byte) (numberParts, bool) {
- var neg bool
- var intp []byte
- var frac []byte
- var exp []byte
-
- s := input
- if len(s) == 0 {
- return numberParts{}, false
- }
-
- // Optional -
- if s[0] == '-' {
- neg = true
- s = s[1:]
- if len(s) == 0 {
- return numberParts{}, false
- }
- }
-
- // Digits
- switch {
- case s[0] == '0':
- // Skip first 0 and no need to store.
- s = s[1:]
-
- case '1' <= s[0] && s[0] <= '9':
- intp = s
- n := 1
- s = s[1:]
- for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
- s = s[1:]
- n++
- }
- intp = intp[:n]
-
- default:
- return numberParts{}, false
- }
-
- // . followed by 1 or more digits.
- if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
- frac = s[1:]
- n := 1
- s = s[2:]
- for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
- s = s[1:]
- n++
- }
- frac = frac[:n]
- }
-
- // e or E followed by an optional - or + and
- // 1 or more digits.
- if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
- s = s[1:]
- exp = s
- n := 0
- if s[0] == '+' || s[0] == '-' {
- s = s[1:]
- n++
- if len(s) == 0 {
- return numberParts{}, false
- }
- }
- for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
- s = s[1:]
- n++
- }
- exp = exp[:n]
- }
-
- return numberParts{
- neg: neg,
- intp: intp,
- frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right.
- exp: exp,
- }, true
-}
-
-// normalizeToIntString returns an integer string in normal form without the
-// E-notation for given numberParts. It will return false if it is not an
-// integer or if the exponent exceeds than max/min int value.
-func normalizeToIntString(n numberParts) (string, bool) {
- intpSize := len(n.intp)
- fracSize := len(n.frac)
-
- if intpSize == 0 && fracSize == 0 {
- return "0", true
- }
-
- var exp int
- if len(n.exp) > 0 {
- i, err := strconv.ParseInt(string(n.exp), 10, 32)
- if err != nil {
- return "", false
- }
- exp = int(i)
- }
-
- var num []byte
- if exp >= 0 {
- // For positive E, shift fraction digits into integer part and also pad
- // with zeroes as needed.
-
- // If there are more digits in fraction than the E value, then the
- // number is not an integer.
- if fracSize > exp {
- return "", false
- }
-
- // Make sure resulting digits are within max value limit to avoid
- // unnecessarily constructing a large byte slice that may simply fail
- // later on.
- const maxDigits = 20 // Max uint64 value has 20 decimal digits.
- if intpSize+exp > maxDigits {
- return "", false
- }
-
- // Set cap to make a copy of integer part when appended.
- num = n.intp[:len(n.intp):len(n.intp)]
- num = append(num, n.frac...)
- for i := 0; i < exp-fracSize; i++ {
- num = append(num, '0')
- }
- } else {
- // For negative E, shift digits in integer part out.
-
- // If there are fractions, then the number is not an integer.
- if fracSize > 0 {
- return "", false
- }
-
- // index is where the decimal point will be after adjusting for negative
- // exponent.
- index := intpSize + exp
- if index < 0 {
- return "", false
- }
-
- num = n.intp
- // If any of the digits being shifted to the right of the decimal point
- // is non-zero, then the number is not an integer.
- for i := index; i < intpSize; i++ {
- if num[i] != '0' {
- return "", false
- }
- }
- num = num[:index]
- }
-
- if n.neg {
- return "-" + string(num), true
- }
- return string(num), true
-}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go
deleted file mode 100644
index f7fea7d8d..000000000
--- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package json
-
-import (
- "strconv"
- "unicode"
- "unicode/utf16"
- "unicode/utf8"
-
- "google.golang.org/protobuf/internal/strs"
-)
-
-func (d *Decoder) parseString(in []byte) (string, int, error) {
- in0 := in
- if len(in) == 0 {
- return "", 0, ErrUnexpectedEOF
- }
- if in[0] != '"' {
- return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0])
- }
- in = in[1:]
- i := indexNeedEscapeInBytes(in)
- in, out := in[i:], in[:i:i] // set cap to prevent mutations
- for len(in) > 0 {
- switch r, n := utf8.DecodeRune(in); {
- case r == utf8.RuneError && n == 1:
- return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string")
- case r < ' ':
- return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r)
- case r == '"':
- in = in[1:]
- n := len(in0) - len(in)
- return string(out), n, nil
- case r == '\\':
- if len(in) < 2 {
- return "", 0, ErrUnexpectedEOF
- }
- switch r := in[1]; r {
- case '"', '\\', '/':
- in, out = in[2:], append(out, r)
- case 'b':
- in, out = in[2:], append(out, '\b')
- case 'f':
- in, out = in[2:], append(out, '\f')
- case 'n':
- in, out = in[2:], append(out, '\n')
- case 'r':
- in, out = in[2:], append(out, '\r')
- case 't':
- in, out = in[2:], append(out, '\t')
- case 'u':
- if len(in) < 6 {
- return "", 0, ErrUnexpectedEOF
- }
- v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
- if err != nil {
- return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6])
- }
- in = in[6:]
-
- r := rune(v)
- if utf16.IsSurrogate(r) {
- if len(in) < 6 {
- return "", 0, ErrUnexpectedEOF
- }
- v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
- r = utf16.DecodeRune(r, rune(v))
- if in[0] != '\\' || in[1] != 'u' ||
- r == unicode.ReplacementChar || err != nil {
- return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6])
- }
- in = in[6:]
- }
- out = append(out, string(r)...)
- default:
- return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2])
- }
- default:
- i := indexNeedEscapeInBytes(in[n:])
- in, out = in[n+i:], append(out, in[:n+i]...)
- }
- }
- return "", 0, ErrUnexpectedEOF
-}
-
-// indexNeedEscapeInBytes returns the index of the character that needs
-// escaping. If no characters need escaping, this returns the input length.
-func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) }
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go
deleted file mode 100644
index 50578d659..000000000
--- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package json
-
-import (
- "bytes"
- "fmt"
- "strconv"
-)
-
-// Kind represents a token kind expressible in the JSON format.
-type Kind uint16
-
-const (
- Invalid Kind = (1 << iota) / 2
- EOF
- Null
- Bool
- Number
- String
- Name
- ObjectOpen
- ObjectClose
- ArrayOpen
- ArrayClose
-
- // comma is only for parsing in between tokens and
- // does not need to be exported.
- comma
-)
-
-func (k Kind) String() string {
- switch k {
- case EOF:
- return "eof"
- case Null:
- return "null"
- case Bool:
- return "bool"
- case Number:
- return "number"
- case String:
- return "string"
- case ObjectOpen:
- return "{"
- case ObjectClose:
- return "}"
- case Name:
- return "name"
- case ArrayOpen:
- return "["
- case ArrayClose:
- return "]"
- case comma:
- return ","
- }
- return ""
-}
-
-// Token provides a parsed token kind and value.
-//
-// Values are provided by the difference accessor methods. The accessor methods
-// Name, Bool, and ParsedString will panic if called on the wrong kind. There
-// are different accessor methods for the Number kind for converting to the
-// appropriate Go numeric type and those methods have the ok return value.
-type Token struct {
- // Token kind.
- kind Kind
- // pos provides the position of the token in the original input.
- pos int
- // raw bytes of the serialized token.
- // This is a subslice into the original input.
- raw []byte
- // boo is parsed boolean value.
- boo bool
- // str is parsed string value.
- str string
-}
-
-// Kind returns the token kind.
-func (t Token) Kind() Kind {
- return t.kind
-}
-
-// RawString returns the read value in string.
-func (t Token) RawString() string {
- return string(t.raw)
-}
-
-// Pos returns the token position from the input.
-func (t Token) Pos() int {
- return t.pos
-}
-
-// Name returns the object name if token is Name, else it panics.
-func (t Token) Name() string {
- if t.kind == Name {
- return t.str
- }
- panic(fmt.Sprintf("Token is not a Name: %v", t.RawString()))
-}
-
-// Bool returns the bool value if token kind is Bool, else it panics.
-func (t Token) Bool() bool {
- if t.kind == Bool {
- return t.boo
- }
- panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString()))
-}
-
-// ParsedString returns the string value for a JSON string token or the read
-// value in string if token is not a string.
-func (t Token) ParsedString() string {
- if t.kind == String {
- return t.str
- }
- panic(fmt.Sprintf("Token is not a String: %v", t.RawString()))
-}
-
-// Float returns the floating-point number if token kind is Number.
-//
-// The floating-point precision is specified by the bitSize parameter: 32 for
-// float32 or 64 for float64. If bitSize=32, the result still has type float64,
-// but it will be convertible to float32 without changing its value. It will
-// return false if the number exceeds the floating point limits for given
-// bitSize.
-func (t Token) Float(bitSize int) (float64, bool) {
- if t.kind != Number {
- return 0, false
- }
- f, err := strconv.ParseFloat(t.RawString(), bitSize)
- if err != nil {
- return 0, false
- }
- return f, true
-}
-
-// Int returns the signed integer number if token is Number.
-//
-// The given bitSize specifies the integer type that the result must fit into.
-// It returns false if the number is not an integer value or if the result
-// exceeds the limits for given bitSize.
-func (t Token) Int(bitSize int) (int64, bool) {
- s, ok := t.getIntStr()
- if !ok {
- return 0, false
- }
- n, err := strconv.ParseInt(s, 10, bitSize)
- if err != nil {
- return 0, false
- }
- return n, true
-}
-
-// Uint returns the signed integer number if token is Number.
-//
-// The given bitSize specifies the unsigned integer type that the result must
-// fit into. It returns false if the number is not an unsigned integer value
-// or if the result exceeds the limits for given bitSize.
-func (t Token) Uint(bitSize int) (uint64, bool) {
- s, ok := t.getIntStr()
- if !ok {
- return 0, false
- }
- n, err := strconv.ParseUint(s, 10, bitSize)
- if err != nil {
- return 0, false
- }
- return n, true
-}
-
-func (t Token) getIntStr() (string, bool) {
- if t.kind != Number {
- return "", false
- }
- parts, ok := parseNumberParts(t.raw)
- if !ok {
- return "", false
- }
- return normalizeToIntString(parts)
-}
-
-// TokenEquals returns true if given Tokens are equal, else false.
-func TokenEquals(x, y Token) bool {
- return x.kind == y.kind &&
- x.pos == y.pos &&
- bytes.Equal(x.raw, y.raw) &&
- x.boo == y.boo &&
- x.str == y.str
-}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go
deleted file mode 100644
index 934f2dcb3..000000000
--- a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go
+++ /dev/null
@@ -1,278 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package json
-
-import (
- "math"
- "math/bits"
- "strconv"
- "strings"
- "unicode/utf8"
-
- "google.golang.org/protobuf/internal/detrand"
- "google.golang.org/protobuf/internal/errors"
-)
-
-// kind represents an encoding type.
-type kind uint8
-
-const (
- _ kind = (1 << iota) / 2
- name
- scalar
- objectOpen
- objectClose
- arrayOpen
- arrayClose
-)
-
-// Encoder provides methods to write out JSON constructs and values. The user is
-// responsible for producing valid sequences of JSON constructs and values.
-type Encoder struct {
- indent string
- lastKind kind
- indents []byte
- out []byte
-}
-
-// NewEncoder returns an Encoder.
-//
-// If indent is a non-empty string, it causes every entry for an Array or Object
-// to be preceded by the indent and trailed by a newline.
-func NewEncoder(buf []byte, indent string) (*Encoder, error) {
- e := &Encoder{
- out: buf,
- }
- if len(indent) > 0 {
- if strings.Trim(indent, " \t") != "" {
- return nil, errors.New("indent may only be composed of space or tab characters")
- }
- e.indent = indent
- }
- return e, nil
-}
-
-// Bytes returns the content of the written bytes.
-func (e *Encoder) Bytes() []byte {
- return e.out
-}
-
-// WriteNull writes out the null value.
-func (e *Encoder) WriteNull() {
- e.prepareNext(scalar)
- e.out = append(e.out, "null"...)
-}
-
-// WriteBool writes out the given boolean value.
-func (e *Encoder) WriteBool(b bool) {
- e.prepareNext(scalar)
- if b {
- e.out = append(e.out, "true"...)
- } else {
- e.out = append(e.out, "false"...)
- }
-}
-
-// WriteString writes out the given string in JSON string value. Returns error
-// if input string contains invalid UTF-8.
-func (e *Encoder) WriteString(s string) error {
- e.prepareNext(scalar)
- var err error
- if e.out, err = appendString(e.out, s); err != nil {
- return err
- }
- return nil
-}
-
-// Sentinel error used for indicating invalid UTF-8.
-var errInvalidUTF8 = errors.New("invalid UTF-8")
-
-func appendString(out []byte, in string) ([]byte, error) {
- out = append(out, '"')
- i := indexNeedEscapeInString(in)
- in, out = in[i:], append(out, in[:i]...)
- for len(in) > 0 {
- switch r, n := utf8.DecodeRuneInString(in); {
- case r == utf8.RuneError && n == 1:
- return out, errInvalidUTF8
- case r < ' ' || r == '"' || r == '\\':
- out = append(out, '\\')
- switch r {
- case '"', '\\':
- out = append(out, byte(r))
- case '\b':
- out = append(out, 'b')
- case '\f':
- out = append(out, 'f')
- case '\n':
- out = append(out, 'n')
- case '\r':
- out = append(out, 'r')
- case '\t':
- out = append(out, 't')
- default:
- out = append(out, 'u')
- out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...)
- out = strconv.AppendUint(out, uint64(r), 16)
- }
- in = in[n:]
- default:
- i := indexNeedEscapeInString(in[n:])
- in, out = in[n+i:], append(out, in[:n+i]...)
- }
- }
- out = append(out, '"')
- return out, nil
-}
-
-// indexNeedEscapeInString returns the index of the character that needs
-// escaping. If no characters need escaping, this returns the input length.
-func indexNeedEscapeInString(s string) int {
- for i, r := range s {
- if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError {
- return i
- }
- }
- return len(s)
-}
-
-// WriteFloat writes out the given float and bitSize in JSON number value.
-func (e *Encoder) WriteFloat(n float64, bitSize int) {
- e.prepareNext(scalar)
- e.out = appendFloat(e.out, n, bitSize)
-}
-
-// appendFloat formats given float in bitSize, and appends to the given []byte.
-func appendFloat(out []byte, n float64, bitSize int) []byte {
- switch {
- case math.IsNaN(n):
- return append(out, `"NaN"`...)
- case math.IsInf(n, +1):
- return append(out, `"Infinity"`...)
- case math.IsInf(n, -1):
- return append(out, `"-Infinity"`...)
- }
-
- // JSON number formatting logic based on encoding/json.
- // See floatEncoder.encode for reference.
- fmt := byte('f')
- if abs := math.Abs(n); abs != 0 {
- if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) ||
- bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
- fmt = 'e'
- }
- }
- out = strconv.AppendFloat(out, n, fmt, -1, bitSize)
- if fmt == 'e' {
- n := len(out)
- if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' {
- out[n-2] = out[n-1]
- out = out[:n-1]
- }
- }
- return out
-}
-
-// WriteInt writes out the given signed integer in JSON number value.
-func (e *Encoder) WriteInt(n int64) {
- e.prepareNext(scalar)
- e.out = strconv.AppendInt(e.out, n, 10)
-}
-
-// WriteUint writes out the given unsigned integer in JSON number value.
-func (e *Encoder) WriteUint(n uint64) {
- e.prepareNext(scalar)
- e.out = strconv.AppendUint(e.out, n, 10)
-}
-
-// StartObject writes out the '{' symbol.
-func (e *Encoder) StartObject() {
- e.prepareNext(objectOpen)
- e.out = append(e.out, '{')
-}
-
-// EndObject writes out the '}' symbol.
-func (e *Encoder) EndObject() {
- e.prepareNext(objectClose)
- e.out = append(e.out, '}')
-}
-
-// WriteName writes out the given string in JSON string value and the name
-// separator ':'. Returns error if input string contains invalid UTF-8, which
-// should not be likely as protobuf field names should be valid.
-func (e *Encoder) WriteName(s string) error {
- e.prepareNext(name)
- var err error
- // Append to output regardless of error.
- e.out, err = appendString(e.out, s)
- e.out = append(e.out, ':')
- return err
-}
-
-// StartArray writes out the '[' symbol.
-func (e *Encoder) StartArray() {
- e.prepareNext(arrayOpen)
- e.out = append(e.out, '[')
-}
-
-// EndArray writes out the ']' symbol.
-func (e *Encoder) EndArray() {
- e.prepareNext(arrayClose)
- e.out = append(e.out, ']')
-}
-
-// prepareNext adds possible comma and indentation for the next value based
-// on last type and indent option. It also updates lastKind to next.
-func (e *Encoder) prepareNext(next kind) {
- defer func() {
- // Set lastKind to next.
- e.lastKind = next
- }()
-
- if len(e.indent) == 0 {
- // Need to add comma on the following condition.
- if e.lastKind&(scalar|objectClose|arrayClose) != 0 &&
- next&(name|scalar|objectOpen|arrayOpen) != 0 {
- e.out = append(e.out, ',')
- // For single-line output, add a random extra space after each
- // comma to make output unstable.
- if detrand.Bool() {
- e.out = append(e.out, ' ')
- }
- }
- return
- }
-
- switch {
- case e.lastKind&(objectOpen|arrayOpen) != 0:
- // If next type is NOT closing, add indent and newline.
- if next&(objectClose|arrayClose) == 0 {
- e.indents = append(e.indents, e.indent...)
- e.out = append(e.out, '\n')
- e.out = append(e.out, e.indents...)
- }
-
- case e.lastKind&(scalar|objectClose|arrayClose) != 0:
- switch {
- // If next type is either a value or name, add comma and newline.
- case next&(name|scalar|objectOpen|arrayOpen) != 0:
- e.out = append(e.out, ',', '\n')
-
- // If next type is a closing object or array, adjust indentation.
- case next&(objectClose|arrayClose) != 0:
- e.indents = e.indents[:len(e.indents)-len(e.indent)]
- e.out = append(e.out, '\n')
- }
- e.out = append(e.out, e.indents...)
-
- case e.lastKind&name != 0:
- e.out = append(e.out, ' ')
- // For multi-line output, add a random extra space after key: to make
- // output unstable.
- if detrand.Bool() {
- e.out = append(e.out, ' ')
- }
- }
-}
diff --git a/vendor/google.golang.org/protobuf/protoadapt/convert.go b/vendor/google.golang.org/protobuf/protoadapt/convert.go
deleted file mode 100644
index ea276d15a..000000000
--- a/vendor/google.golang.org/protobuf/protoadapt/convert.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package protoadapt bridges the original and new proto APIs.
-package protoadapt
-
-import (
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/runtime/protoiface"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-// MessageV1 is the original [github.com/golang/protobuf/proto.Message] type.
-type MessageV1 = protoiface.MessageV1
-
-// MessageV2 is the [google.golang.org/protobuf/proto.Message] type used by the
-// current [google.golang.org/protobuf] module, adding support for reflection.
-type MessageV2 = proto.Message
-
-// MessageV1Of converts a v2 message to a v1 message.
-// It returns nil if m is nil.
-func MessageV1Of(m MessageV2) MessageV1 {
- return protoimpl.X.ProtoMessageV1Of(m)
-}
-
-// MessageV2Of converts a v1 message to a v2 message.
-// It returns nil if m is nil.
-func MessageV2Of(m MessageV1) MessageV2 {
- return protoimpl.X.ProtoMessageV2Of(m)
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
deleted file mode 100644
index 823dbf3ba..000000000
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
+++ /dev/null
@@ -1,286 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package protodesc provides functionality for converting
-// FileDescriptorProto messages to/from [protoreflect.FileDescriptor] values.
-//
-// The google.protobuf.FileDescriptorProto is a protobuf message that describes
-// the type information for a .proto file in a form that is easily serializable.
-// The [protoreflect.FileDescriptor] is a more structured representation of
-// the FileDescriptorProto message where references and remote dependencies
-// can be directly followed.
-package protodesc
-
-import (
- "strings"
-
- "google.golang.org/protobuf/internal/editionssupport"
- "google.golang.org/protobuf/internal/errors"
- "google.golang.org/protobuf/internal/filedesc"
- "google.golang.org/protobuf/internal/pragma"
- "google.golang.org/protobuf/internal/strs"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-
- "google.golang.org/protobuf/types/descriptorpb"
-)
-
-// Resolver is the resolver used by [NewFile] to resolve dependencies.
-// The enums and messages provided must belong to some parent file,
-// which is also registered.
-//
-// It is implemented by [protoregistry.Files].
-type Resolver interface {
- FindFileByPath(string) (protoreflect.FileDescriptor, error)
- FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error)
-}
-
-// FileOptions configures the construction of file descriptors.
-type FileOptions struct {
- pragma.NoUnkeyedLiterals
-
- // AllowUnresolvable configures New to permissively allow unresolvable
- // file, enum, or message dependencies. Unresolved dependencies are replaced
- // by placeholder equivalents.
- //
- // The following dependencies may be left unresolved:
- // • Resolving an imported file.
- // • Resolving the type for a message field or extension field.
- // If the kind of the field is unknown, then a placeholder is used for both
- // the Enum and Message accessors on the protoreflect.FieldDescriptor.
- // • Resolving an enum value set as the default for an optional enum field.
- // If unresolvable, the protoreflect.FieldDescriptor.Default is set to the
- // first value in the associated enum (or zero if the also enum dependency
- // is also unresolvable). The protoreflect.FieldDescriptor.DefaultEnumValue
- // is populated with a placeholder.
- // • Resolving the extended message type for an extension field.
- // • Resolving the input or output message type for a service method.
- //
- // If the unresolved dependency uses a relative name,
- // then the placeholder will contain an invalid FullName with a "*." prefix,
- // indicating that the starting prefix of the full name is unknown.
- AllowUnresolvable bool
-}
-
-// NewFile creates a new [protoreflect.FileDescriptor] from the provided
-// file descriptor message. See [FileOptions.New] for more information.
-func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) {
- return FileOptions{}.New(fd, r)
-}
-
-// NewFiles creates a new [protoregistry.Files] from the provided
-// FileDescriptorSet message. See [FileOptions.NewFiles] for more information.
-func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) {
- return FileOptions{}.NewFiles(fd)
-}
-
-// New creates a new [protoreflect.FileDescriptor] from the provided
-// file descriptor message. The file must represent a valid proto file according
-// to protobuf semantics. The returned descriptor is a deep copy of the input.
-//
-// Any imported files, enum types, or message types referenced in the file are
-// resolved using the provided registry. When looking up an import file path,
-// the path must be unique. The newly created file descriptor is not registered
-// back into the provided file registry.
-func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) {
- if r == nil {
- r = (*protoregistry.Files)(nil) // empty resolver
- }
-
- // Handle the file descriptor content.
- f := &filedesc.File{L2: &filedesc.FileL2{}}
- switch fd.GetSyntax() {
- case "proto2", "":
- f.L1.Syntax = protoreflect.Proto2
- f.L1.Edition = filedesc.EditionProto2
- case "proto3":
- f.L1.Syntax = protoreflect.Proto3
- f.L1.Edition = filedesc.EditionProto3
- case "editions":
- f.L1.Syntax = protoreflect.Editions
- f.L1.Edition = fromEditionProto(fd.GetEdition())
- default:
- return nil, errors.New("invalid syntax: %q", fd.GetSyntax())
- }
- f.L1.Path = fd.GetName()
- if f.L1.Path == "" {
- return nil, errors.New("file path must be populated")
- }
- if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) {
- // Allow cmd/protoc-gen-go/testdata to use any edition for easier
- // testing of upcoming edition features.
- if !strings.HasPrefix(fd.GetName(), "cmd/protoc-gen-go/testdata/") {
- return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition())
- }
- }
- f.L1.Package = protoreflect.FullName(fd.GetPackage())
- if !f.L1.Package.IsValid() && f.L1.Package != "" {
- return nil, errors.New("invalid package: %q", f.L1.Package)
- }
- if opts := fd.GetOptions(); opts != nil {
- opts = proto.Clone(opts).(*descriptorpb.FileOptions)
- f.L2.Options = func() protoreflect.ProtoMessage { return opts }
- }
- initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures())
-
- f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency()))
- for _, i := range fd.GetPublicDependency() {
- if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsPublic {
- return nil, errors.New("invalid or duplicate public import index: %d", i)
- }
- f.L2.Imports[i].IsPublic = true
- }
- imps := importSet{f.Path(): true}
- for i, path := range fd.GetDependency() {
- imp := &f.L2.Imports[i]
- f, err := r.FindFileByPath(path)
- if err == protoregistry.NotFound && o.AllowUnresolvable {
- f = filedesc.PlaceholderFile(path)
- } else if err != nil {
- return nil, errors.New("could not resolve import %q: %v", path, err)
- }
- imp.FileDescriptor = f
-
- if imps[imp.Path()] {
- return nil, errors.New("already imported %q", path)
- }
- imps[imp.Path()] = true
- }
- for i := range fd.GetDependency() {
- imp := &f.L2.Imports[i]
- imps.importPublic(imp.Imports())
- }
-
- // Handle source locations.
- f.L2.Locations.File = f
- for _, loc := range fd.GetSourceCodeInfo().GetLocation() {
- var l protoreflect.SourceLocation
- // TODO: Validate that the path points to an actual declaration?
- l.Path = protoreflect.SourcePath(loc.GetPath())
- s := loc.GetSpan()
- switch len(s) {
- case 3:
- l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[0]), int(s[2])
- case 4:
- l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[2]), int(s[3])
- default:
- return nil, errors.New("invalid span: %v", s)
- }
- // TODO: Validate that the span information is sensible?
- // See https://github.com/protocolbuffers/protobuf/issues/6378.
- if false && (l.EndLine < l.StartLine || l.StartLine < 0 || l.StartColumn < 0 || l.EndColumn < 0 ||
- (l.StartLine == l.EndLine && l.EndColumn <= l.StartColumn)) {
- return nil, errors.New("invalid span: %v", s)
- }
- l.LeadingDetachedComments = loc.GetLeadingDetachedComments()
- l.LeadingComments = loc.GetLeadingComments()
- l.TrailingComments = loc.GetTrailingComments()
- f.L2.Locations.List = append(f.L2.Locations.List, l)
- }
-
- // Step 1: Allocate and derive the names for all declarations.
- // This copies all fields from the descriptor proto except:
- // google.protobuf.FieldDescriptorProto.type_name
- // google.protobuf.FieldDescriptorProto.default_value
- // google.protobuf.FieldDescriptorProto.oneof_index
- // google.protobuf.FieldDescriptorProto.extendee
- // google.protobuf.MethodDescriptorProto.input
- // google.protobuf.MethodDescriptorProto.output
- var err error
- sb := new(strs.Builder)
- r1 := make(descsByName)
- if f.L1.Enums.List, err = r1.initEnumDeclarations(fd.GetEnumType(), f, sb); err != nil {
- return nil, err
- }
- if f.L1.Messages.List, err = r1.initMessagesDeclarations(fd.GetMessageType(), f, sb); err != nil {
- return nil, err
- }
- if f.L1.Extensions.List, err = r1.initExtensionDeclarations(fd.GetExtension(), f, sb); err != nil {
- return nil, err
- }
- if f.L1.Services.List, err = r1.initServiceDeclarations(fd.GetService(), f, sb); err != nil {
- return nil, err
- }
-
- // Step 2: Resolve every dependency reference not handled by step 1.
- r2 := &resolver{local: r1, remote: r, imports: imps, allowUnresolvable: o.AllowUnresolvable}
- if err := r2.resolveMessageDependencies(f.L1.Messages.List, fd.GetMessageType()); err != nil {
- return nil, err
- }
- if err := r2.resolveExtensionDependencies(f.L1.Extensions.List, fd.GetExtension()); err != nil {
- return nil, err
- }
- if err := r2.resolveServiceDependencies(f.L1.Services.List, fd.GetService()); err != nil {
- return nil, err
- }
-
- // Step 3: Validate every enum, message, and extension declaration.
- if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil {
- return nil, err
- }
- if err := validateMessageDeclarations(f, f.L1.Messages.List, fd.GetMessageType()); err != nil {
- return nil, err
- }
- if err := validateExtensionDeclarations(f, f.L1.Extensions.List, fd.GetExtension()); err != nil {
- return nil, err
- }
-
- return f, nil
-}
-
-type importSet map[string]bool
-
-func (is importSet) importPublic(imps protoreflect.FileImports) {
- for i := 0; i < imps.Len(); i++ {
- if imp := imps.Get(i); imp.IsPublic {
- is[imp.Path()] = true
- is.importPublic(imp.Imports())
- }
- }
-}
-
-// NewFiles creates a new [protoregistry.Files] from the provided
-// FileDescriptorSet message. The descriptor set must include only
-// valid files according to protobuf semantics. The returned descriptors
-// are a deep copy of the input.
-func (o FileOptions) NewFiles(fds *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) {
- files := make(map[string]*descriptorpb.FileDescriptorProto)
- for _, fd := range fds.File {
- if _, ok := files[fd.GetName()]; ok {
- return nil, errors.New("file appears multiple times: %q", fd.GetName())
- }
- files[fd.GetName()] = fd
- }
- r := &protoregistry.Files{}
- for _, fd := range files {
- if err := o.addFileDeps(r, fd, files); err != nil {
- return nil, err
- }
- }
- return r, nil
-}
-func (o FileOptions) addFileDeps(r *protoregistry.Files, fd *descriptorpb.FileDescriptorProto, files map[string]*descriptorpb.FileDescriptorProto) error {
- // Set the entry to nil while descending into a file's dependencies to detect cycles.
- files[fd.GetName()] = nil
- for _, dep := range fd.Dependency {
- depfd, ok := files[dep]
- if depfd == nil {
- if ok {
- return errors.New("import cycle in file: %q", dep)
- }
- continue
- }
- if err := o.addFileDeps(r, depfd, files); err != nil {
- return err
- }
- }
- // Delete the entry once dependencies are processed.
- delete(files, fd.GetName())
- f, err := o.New(fd, r)
- if err != nil {
- return err
- }
- return r.RegisterFile(f)
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
deleted file mode 100644
index 9da34998b..000000000
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package protodesc
-
-import (
- "google.golang.org/protobuf/internal/errors"
- "google.golang.org/protobuf/internal/filedesc"
- "google.golang.org/protobuf/internal/strs"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
-
- "google.golang.org/protobuf/types/descriptorpb"
-)
-
-type descsByName map[protoreflect.FullName]protoreflect.Descriptor
-
-func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (es []filedesc.Enum, err error) {
- es = make([]filedesc.Enum, len(eds)) // allocate up-front to ensure stable pointers
- for i, ed := range eds {
- e := &es[i]
- e.L2 = new(filedesc.EnumL2)
- if e.L0, err = r.makeBase(e, parent, ed.GetName(), i, sb); err != nil {
- return nil, err
- }
- if opts := ed.GetOptions(); opts != nil {
- opts = proto.Clone(opts).(*descriptorpb.EnumOptions)
- e.L2.Options = func() protoreflect.ProtoMessage { return opts }
- }
- e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures())
- for _, s := range ed.GetReservedName() {
- e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s))
- }
- for _, rr := range ed.GetReservedRange() {
- e.L2.ReservedRanges.List = append(e.L2.ReservedRanges.List, [2]protoreflect.EnumNumber{
- protoreflect.EnumNumber(rr.GetStart()),
- protoreflect.EnumNumber(rr.GetEnd()),
- })
- }
- if e.L2.Values.List, err = r.initEnumValuesFromDescriptorProto(ed.GetValue(), e, sb); err != nil {
- return nil, err
- }
- }
- return es, nil
-}
-
-func (r descsByName) initEnumValuesFromDescriptorProto(vds []*descriptorpb.EnumValueDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (vs []filedesc.EnumValue, err error) {
- vs = make([]filedesc.EnumValue, len(vds)) // allocate up-front to ensure stable pointers
- for i, vd := range vds {
- v := &vs[i]
- if v.L0, err = r.makeBase(v, parent, vd.GetName(), i, sb); err != nil {
- return nil, err
- }
- if opts := vd.GetOptions(); opts != nil {
- opts = proto.Clone(opts).(*descriptorpb.EnumValueOptions)
- v.L1.Options = func() protoreflect.ProtoMessage { return opts }
- }
- v.L1.Number = protoreflect.EnumNumber(vd.GetNumber())
- }
- return vs, nil
-}
-
-func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Message, err error) {
- ms = make([]filedesc.Message, len(mds)) // allocate up-front to ensure stable pointers
- for i, md := range mds {
- m := &ms[i]
- m.L2 = new(filedesc.MessageL2)
- if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil {
- return nil, err
- }
- m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures())
- if opts := md.GetOptions(); opts != nil {
- opts = proto.Clone(opts).(*descriptorpb.MessageOptions)
- m.L2.Options = func() protoreflect.ProtoMessage { return opts }
- m.L1.IsMapEntry = opts.GetMapEntry()
- m.L1.IsMessageSet = opts.GetMessageSetWireFormat()
- }
- for _, s := range md.GetReservedName() {
- m.L2.ReservedNames.List = append(m.L2.ReservedNames.List, protoreflect.Name(s))
- }
- for _, rr := range md.GetReservedRange() {
- m.L2.ReservedRanges.List = append(m.L2.ReservedRanges.List, [2]protoreflect.FieldNumber{
- protoreflect.FieldNumber(rr.GetStart()),
- protoreflect.FieldNumber(rr.GetEnd()),
- })
- }
- for _, xr := range md.GetExtensionRange() {
- m.L2.ExtensionRanges.List = append(m.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{
- protoreflect.FieldNumber(xr.GetStart()),
- protoreflect.FieldNumber(xr.GetEnd()),
- })
- var optsFunc func() protoreflect.ProtoMessage
- if opts := xr.GetOptions(); opts != nil {
- opts = proto.Clone(opts).(*descriptorpb.ExtensionRangeOptions)
- optsFunc = func() protoreflect.ProtoMessage { return opts }
- }
- m.L2.ExtensionRangeOptions = append(m.L2.ExtensionRangeOptions, optsFunc)
- }
- if m.L2.Fields.List, err = r.initFieldsFromDescriptorProto(md.GetField(), m, sb); err != nil {
- return nil, err
- }
- if m.L2.Oneofs.List, err = r.initOneofsFromDescriptorProto(md.GetOneofDecl(), m, sb); err != nil {
- return nil, err
- }
- if m.L1.Enums.List, err = r.initEnumDeclarations(md.GetEnumType(), m, sb); err != nil {
- return nil, err
- }
- if m.L1.Messages.List, err = r.initMessagesDeclarations(md.GetNestedType(), m, sb); err != nil {
- return nil, err
- }
- if m.L1.Extensions.List, err = r.initExtensionDeclarations(md.GetExtension(), m, sb); err != nil {
- return nil, err
- }
- }
- return ms, nil
-}
-
-// canBePacked returns whether the field can use packed encoding:
-// https://protobuf.dev/programming-guides/encoding/#packed
-func canBePacked(fd *descriptorpb.FieldDescriptorProto) bool {
- if fd.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED {
- return false // not a repeated field
- }
-
- switch protoreflect.Kind(fd.GetType()) {
- case protoreflect.MessageKind, protoreflect.GroupKind:
- return false // not a scalar type field
-
- case protoreflect.StringKind, protoreflect.BytesKind:
- // string and bytes can explicitly not be declared as packed,
- // see https://protobuf.dev/programming-guides/encoding/#packed
- return false
-
- default:
- return true
- }
-}
-
-func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) {
- fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers
- for i, fd := range fds {
- f := &fs[i]
- if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil {
- return nil, err
- }
- f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures())
- f.L1.IsProto3Optional = fd.GetProto3Optional()
- if opts := fd.GetOptions(); opts != nil {
- opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
- f.L1.Options = func() protoreflect.ProtoMessage { return opts }
- f.L1.IsLazy = opts.GetLazy()
- if opts.Packed != nil {
- f.L1.EditionFeatures.IsPacked = opts.GetPacked()
- }
- }
- f.L1.Number = protoreflect.FieldNumber(fd.GetNumber())
- f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel())
- if fd.Type != nil {
- f.L1.Kind = protoreflect.Kind(fd.GetType())
- }
- if fd.JsonName != nil {
- f.L1.StringName.InitJSON(fd.GetJsonName())
- }
-
- if f.L1.EditionFeatures.IsLegacyRequired {
- f.L1.Cardinality = protoreflect.Required
- }
-
- if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded {
- f.L1.Kind = protoreflect.GroupKind
- }
- }
- return fs, nil
-}
-
-func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (os []filedesc.Oneof, err error) {
- os = make([]filedesc.Oneof, len(ods)) // allocate up-front to ensure stable pointers
- for i, od := range ods {
- o := &os[i]
- if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil {
- return nil, err
- }
- o.L1.EditionFeatures = mergeEditionFeatures(parent, od.GetOptions().GetFeatures())
- if opts := od.GetOptions(); opts != nil {
- opts = proto.Clone(opts).(*descriptorpb.OneofOptions)
- o.L1.Options = func() protoreflect.ProtoMessage { return opts }
- }
- }
- return os, nil
-}
-
-func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (xs []filedesc.Extension, err error) {
- xs = make([]filedesc.Extension, len(xds)) // allocate up-front to ensure stable pointers
- for i, xd := range xds {
- x := &xs[i]
- x.L2 = new(filedesc.ExtensionL2)
- if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil {
- return nil, err
- }
- x.L1.EditionFeatures = mergeEditionFeatures(parent, xd.GetOptions().GetFeatures())
- if opts := xd.GetOptions(); opts != nil {
- opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
- x.L2.Options = func() protoreflect.ProtoMessage { return opts }
- if opts.Packed != nil {
- x.L1.EditionFeatures.IsPacked = opts.GetPacked()
- }
- }
- x.L1.Number = protoreflect.FieldNumber(xd.GetNumber())
- x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel())
- if xd.Type != nil {
- x.L1.Kind = protoreflect.Kind(xd.GetType())
- }
- if xd.JsonName != nil {
- x.L2.StringName.InitJSON(xd.GetJsonName())
- }
- if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded {
- x.L1.Kind = protoreflect.GroupKind
- }
- }
- return xs, nil
-}
-
-func (r descsByName) initServiceDeclarations(sds []*descriptorpb.ServiceDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ss []filedesc.Service, err error) {
- ss = make([]filedesc.Service, len(sds)) // allocate up-front to ensure stable pointers
- for i, sd := range sds {
- s := &ss[i]
- s.L2 = new(filedesc.ServiceL2)
- if s.L0, err = r.makeBase(s, parent, sd.GetName(), i, sb); err != nil {
- return nil, err
- }
- if opts := sd.GetOptions(); opts != nil {
- opts = proto.Clone(opts).(*descriptorpb.ServiceOptions)
- s.L2.Options = func() protoreflect.ProtoMessage { return opts }
- }
- if s.L2.Methods.List, err = r.initMethodsFromDescriptorProto(sd.GetMethod(), s, sb); err != nil {
- return nil, err
- }
- }
- return ss, nil
-}
-
-func (r descsByName) initMethodsFromDescriptorProto(mds []*descriptorpb.MethodDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Method, err error) {
- ms = make([]filedesc.Method, len(mds)) // allocate up-front to ensure stable pointers
- for i, md := range mds {
- m := &ms[i]
- if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil {
- return nil, err
- }
- if opts := md.GetOptions(); opts != nil {
- opts = proto.Clone(opts).(*descriptorpb.MethodOptions)
- m.L1.Options = func() protoreflect.ProtoMessage { return opts }
- }
- m.L1.IsStreamingClient = md.GetClientStreaming()
- m.L1.IsStreamingServer = md.GetServerStreaming()
- }
- return ms, nil
-}
-
-func (r descsByName) makeBase(child, parent protoreflect.Descriptor, name string, idx int, sb *strs.Builder) (filedesc.BaseL0, error) {
- if !protoreflect.Name(name).IsValid() {
- return filedesc.BaseL0{}, errors.New("descriptor %q has an invalid nested name: %q", parent.FullName(), name)
- }
-
- // Derive the full name of the child.
- // Note that enum values are a sibling to the enum parent in the namespace.
- var fullName protoreflect.FullName
- if _, ok := parent.(protoreflect.EnumDescriptor); ok {
- fullName = sb.AppendFullName(parent.FullName().Parent(), protoreflect.Name(name))
- } else {
- fullName = sb.AppendFullName(parent.FullName(), protoreflect.Name(name))
- }
- if _, ok := r[fullName]; ok {
- return filedesc.BaseL0{}, errors.New("descriptor %q already declared", fullName)
- }
- r[fullName] = child
-
- // TODO: Verify that the full name does not already exist in the resolver?
- // This is not as critical since most usages of NewFile will register
- // the created file back into the registry, which will perform this check.
-
- return filedesc.BaseL0{
- FullName: fullName,
- ParentFile: parent.ParentFile().(*filedesc.File),
- Parent: parent,
- Index: idx,
- }, nil
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
deleted file mode 100644
index ff692436e..000000000
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
+++ /dev/null
@@ -1,291 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package protodesc
-
-import (
- "google.golang.org/protobuf/internal/encoding/defval"
- "google.golang.org/protobuf/internal/errors"
- "google.golang.org/protobuf/internal/filedesc"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-
- "google.golang.org/protobuf/types/descriptorpb"
-)
-
-// resolver is a wrapper around a local registry of declarations within the file
-// and the remote resolver. The remote resolver is restricted to only return
-// descriptors that have been imported.
-type resolver struct {
- local descsByName
- remote Resolver
- imports importSet
-
- allowUnresolvable bool
-}
-
-func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) (err error) {
- for i, md := range mds {
- m := &ms[i]
- for j, fd := range md.GetField() {
- f := &m.L2.Fields.List[j]
- if f.L1.Cardinality == protoreflect.Required {
- m.L2.RequiredNumbers.List = append(m.L2.RequiredNumbers.List, f.L1.Number)
- }
- if fd.OneofIndex != nil {
- k := int(fd.GetOneofIndex())
- if !(0 <= k && k < len(md.GetOneofDecl())) {
- return errors.New("message field %q has an invalid oneof index: %d", f.FullName(), k)
- }
- o := &m.L2.Oneofs.List[k]
- f.L1.ContainingOneof = o
- o.L1.Fields.List = append(o.L1.Fields.List, f)
- }
-
- if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName())); err != nil {
- return errors.New("message field %q cannot resolve type: %v", f.FullName(), err)
- }
- if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) {
- // A map field might inherit delimited encoding from a file-wide default feature.
- // But maps never actually use delimited encoding. (At least for now...)
- f.L1.Kind = protoreflect.MessageKind
- }
- if fd.DefaultValue != nil {
- v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable)
- if err != nil {
- return errors.New("message field %q has invalid default: %v", f.FullName(), err)
- }
- f.L1.Default = filedesc.DefaultValue(v, ev)
- }
- }
-
- if err := r.resolveMessageDependencies(m.L1.Messages.List, md.GetNestedType()); err != nil {
- return err
- }
- if err := r.resolveExtensionDependencies(m.L1.Extensions.List, md.GetExtension()); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) {
- for i, xd := range xds {
- x := &xs[i]
- if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee())); err != nil {
- return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err)
- }
- if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName())); err != nil {
- return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err)
- }
- if xd.DefaultValue != nil {
- v, ev, err := unmarshalDefault(xd.GetDefaultValue(), x, r.allowUnresolvable)
- if err != nil {
- return errors.New("extension field %q has invalid default: %v", x.FullName(), err)
- }
- x.L2.Default = filedesc.DefaultValue(v, ev)
- }
- }
- return nil
-}
-
-func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*descriptorpb.ServiceDescriptorProto) (err error) {
- for i, sd := range sds {
- s := &ss[i]
- for j, md := range sd.GetMethod() {
- m := &s.L2.Methods.List[j]
- m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()))
- if err != nil {
- return errors.New("service method %q cannot resolve input: %v", m.FullName(), err)
- }
- m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()))
- if err != nil {
- return errors.New("service method %q cannot resolve output: %v", m.FullName(), err)
- }
- }
- }
- return nil
-}
-
-// findTarget finds an enum or message descriptor if k is an enum, message,
-// group, or unknown. If unknown, and the name could be resolved, the kind
-// returned kind is set based on the type of the resolved descriptor.
-func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) {
- switch k {
- case protoreflect.EnumKind:
- ed, err := r.findEnumDescriptor(scope, ref)
- if err != nil {
- return 0, nil, nil, err
- }
- return k, ed, nil, nil
- case protoreflect.MessageKind, protoreflect.GroupKind:
- md, err := r.findMessageDescriptor(scope, ref)
- if err != nil {
- return 0, nil, nil, err
- }
- return k, nil, md, nil
- case 0:
- // Handle unspecified kinds (possible with parsers that operate
- // on a per-file basis without knowledge of dependencies).
- d, err := r.findDescriptor(scope, ref)
- if err == protoregistry.NotFound && r.allowUnresolvable {
- return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil
- } else if err == protoregistry.NotFound {
- return 0, nil, nil, errors.New("%q not found", ref.FullName())
- } else if err != nil {
- return 0, nil, nil, err
- }
- switch d := d.(type) {
- case protoreflect.EnumDescriptor:
- return protoreflect.EnumKind, d, nil, nil
- case protoreflect.MessageDescriptor:
- return protoreflect.MessageKind, nil, d, nil
- default:
- return 0, nil, nil, errors.New("unknown kind")
- }
- default:
- if ref != "" {
- return 0, nil, nil, errors.New("target name cannot be specified for %v", k)
- }
- if !k.IsValid() {
- return 0, nil, nil, errors.New("invalid kind: %d", k)
- }
- return k, nil, nil, nil
- }
-}
-
-// findDescriptor finds the descriptor by name,
-// which may be a relative name within some scope.
-//
-// Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar",
-// then the following full names are searched:
-// - fizz.buzz.Foo.Bar
-// - fizz.Foo.Bar
-// - Foo.Bar
-func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) {
- if !ref.IsValid() {
- return nil, errors.New("invalid name reference: %q", ref)
- }
- if ref.IsFull() {
- scope, ref = "", ref[1:]
- }
- var foundButNotImported protoreflect.Descriptor
- for {
- // Derive the full name to search.
- s := protoreflect.FullName(ref)
- if scope != "" {
- s = scope + "." + s
- }
-
- // Check the current file for the descriptor.
- if d, ok := r.local[s]; ok {
- return d, nil
- }
-
- // Check the remote registry for the descriptor.
- d, err := r.remote.FindDescriptorByName(s)
- if err == nil {
- // Only allow descriptors covered by one of the imports.
- if r.imports[d.ParentFile().Path()] {
- return d, nil
- }
- foundButNotImported = d
- } else if err != protoregistry.NotFound {
- return nil, errors.Wrap(err, "%q", s)
- }
-
- // Continue on at a higher level of scoping.
- if scope == "" {
- if d := foundButNotImported; d != nil {
- return nil, errors.New("resolved %q, but %q is not imported", d.FullName(), d.ParentFile().Path())
- }
- return nil, protoregistry.NotFound
- }
- scope = scope.Parent()
- }
-}
-
-func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.EnumDescriptor, error) {
- d, err := r.findDescriptor(scope, ref)
- if err == protoregistry.NotFound && r.allowUnresolvable {
- return filedesc.PlaceholderEnum(ref.FullName()), nil
- } else if err == protoregistry.NotFound {
- return nil, errors.New("%q not found", ref.FullName())
- } else if err != nil {
- return nil, err
- }
- ed, ok := d.(protoreflect.EnumDescriptor)
- if !ok {
- return nil, errors.New("resolved %q, but it is not an enum", d.FullName())
- }
- return ed, nil
-}
-
-func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.MessageDescriptor, error) {
- d, err := r.findDescriptor(scope, ref)
- if err == protoregistry.NotFound && r.allowUnresolvable {
- return filedesc.PlaceholderMessage(ref.FullName()), nil
- } else if err == protoregistry.NotFound {
- return nil, errors.New("%q not found", ref.FullName())
- } else if err != nil {
- return nil, err
- }
- md, ok := d.(protoreflect.MessageDescriptor)
- if !ok {
- return nil, errors.New("resolved %q, but it is not an message", d.FullName())
- }
- return md, nil
-}
-
-// partialName is the partial name. A leading dot means that the name is full,
-// otherwise the name is relative to some current scope.
-// See google.protobuf.FieldDescriptorProto.type_name.
-type partialName string
-
-func (s partialName) IsFull() bool {
- return len(s) > 0 && s[0] == '.'
-}
-
-func (s partialName) IsValid() bool {
- if s.IsFull() {
- return protoreflect.FullName(s[1:]).IsValid()
- }
- return protoreflect.FullName(s).IsValid()
-}
-
-const unknownPrefix = "*."
-
-// FullName converts the partial name to a full name on a best-effort basis.
-// If relative, it creates an invalid full name, using a "*." prefix
-// to indicate that the start of the full name is unknown.
-func (s partialName) FullName() protoreflect.FullName {
- if s.IsFull() {
- return protoreflect.FullName(s[1:])
- }
- return protoreflect.FullName(unknownPrefix + s)
-}
-
-func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvable bool) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) {
- var evs protoreflect.EnumValueDescriptors
- if fd.Enum() != nil {
- evs = fd.Enum().Values()
- }
- v, ev, err := defval.Unmarshal(s, fd.Kind(), evs, defval.Descriptor)
- if err != nil && allowUnresolvable && evs != nil && protoreflect.Name(s).IsValid() {
- v = protoreflect.ValueOfEnum(0)
- if evs.Len() > 0 {
- v = protoreflect.ValueOfEnum(evs.Get(0).Number())
- }
- ev = filedesc.PlaceholderEnumValue(fd.Enum().FullName().Parent().Append(protoreflect.Name(s)))
- } else if err != nil {
- return v, ev, err
- }
- if !fd.HasPresence() {
- return v, ev, errors.New("cannot be specified with implicit field presence")
- }
- if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated {
- return v, ev, errors.New("cannot be specified on composite types")
- }
- return v, ev, nil
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
deleted file mode 100644
index c343d9227..000000000
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
+++ /dev/null
@@ -1,359 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package protodesc
-
-import (
- "strings"
- "unicode"
-
- "google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/internal/errors"
- "google.golang.org/protobuf/internal/filedesc"
- "google.golang.org/protobuf/internal/flags"
- "google.golang.org/protobuf/internal/genid"
- "google.golang.org/protobuf/internal/strs"
- "google.golang.org/protobuf/reflect/protoreflect"
-
- "google.golang.org/protobuf/types/descriptorpb"
-)
-
-func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescriptorProto) error {
- for i, ed := range eds {
- e := &es[i]
- if err := e.L2.ReservedNames.CheckValid(); err != nil {
- return errors.New("enum %q reserved names has %v", e.FullName(), err)
- }
- if err := e.L2.ReservedRanges.CheckValid(); err != nil {
- return errors.New("enum %q reserved ranges has %v", e.FullName(), err)
- }
- if len(ed.GetValue()) == 0 {
- return errors.New("enum %q must contain at least one value declaration", e.FullName())
- }
- allowAlias := ed.GetOptions().GetAllowAlias()
- foundAlias := false
- for i := 0; i < e.Values().Len(); i++ {
- v1 := e.Values().Get(i)
- if v2 := e.Values().ByNumber(v1.Number()); v1 != v2 {
- foundAlias = true
- if !allowAlias {
- return errors.New("enum %q has conflicting non-aliased values on number %d: %q with %q", e.FullName(), v1.Number(), v1.Name(), v2.Name())
- }
- }
- }
- if allowAlias && !foundAlias {
- return errors.New("enum %q allows aliases, but none were found", e.FullName())
- }
- if !e.IsClosed() {
- if v := e.Values().Get(0); v.Number() != 0 {
- return errors.New("enum %q using open semantics must have zero number for the first value", v.FullName())
- }
- // Verify that value names in open enums do not conflict if the
- // case-insensitive prefix is removed.
- // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055
- names := map[string]protoreflect.EnumValueDescriptor{}
- prefix := strings.Replace(strings.ToLower(string(e.Name())), "_", "", -1)
- for i := 0; i < e.Values().Len(); i++ {
- v1 := e.Values().Get(i)
- s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix))
- if v2, ok := names[s]; ok && v1.Number() != v2.Number() {
- return errors.New("enum %q using open semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name())
- }
- names[s] = v1
- }
- }
-
- for j, vd := range ed.GetValue() {
- v := &e.L2.Values.List[j]
- if vd.Number == nil {
- return errors.New("enum value %q must have a specified number", v.FullName())
- }
- if e.L2.ReservedNames.Has(v.Name()) {
- return errors.New("enum value %q must not use reserved name", v.FullName())
- }
- if e.L2.ReservedRanges.Has(v.Number()) {
- return errors.New("enum value %q must not use reserved number %d", v.FullName(), v.Number())
- }
- }
- }
- return nil
-}
-
-func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error {
- // There are a few limited exceptions only for proto3
- isProto3 := file.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3)
- for i, md := range mds {
- m := &ms[i]
-
- // Handle the message descriptor itself.
- isMessageSet := md.GetOptions().GetMessageSetWireFormat()
- if err := m.L2.ReservedNames.CheckValid(); err != nil {
- return errors.New("message %q reserved names has %v", m.FullName(), err)
- }
- if err := m.L2.ReservedRanges.CheckValid(isMessageSet); err != nil {
- return errors.New("message %q reserved ranges has %v", m.FullName(), err)
- }
- if err := m.L2.ExtensionRanges.CheckValid(isMessageSet); err != nil {
- return errors.New("message %q extension ranges has %v", m.FullName(), err)
- }
- if err := (*filedesc.FieldRanges).CheckOverlap(&m.L2.ReservedRanges, &m.L2.ExtensionRanges); err != nil {
- return errors.New("message %q reserved and extension ranges has %v", m.FullName(), err)
- }
- for i := 0; i < m.Fields().Len(); i++ {
- f1 := m.Fields().Get(i)
- if f2 := m.Fields().ByNumber(f1.Number()); f1 != f2 {
- return errors.New("message %q has conflicting fields: %q with %q", m.FullName(), f1.Name(), f2.Name())
- }
- }
- if isMessageSet && !flags.ProtoLegacy {
- return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName())
- }
- if isMessageSet && (isProto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) {
- return errors.New("message %q is an invalid proto1 MessageSet", m.FullName())
- }
- if isProto3 {
- if m.ExtensionRanges().Len() > 0 {
- return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName())
- }
- }
-
- for j, fd := range md.GetField() {
- f := &m.L2.Fields.List[j]
- if m.L2.ReservedNames.Has(f.Name()) {
- return errors.New("message field %q must not use reserved name", f.FullName())
- }
- if !f.Number().IsValid() {
- return errors.New("message field %q has an invalid number: %d", f.FullName(), f.Number())
- }
- if !f.Cardinality().IsValid() {
- return errors.New("message field %q has an invalid cardinality: %d", f.FullName(), f.Cardinality())
- }
- if m.L2.ReservedRanges.Has(f.Number()) {
- return errors.New("message field %q must not use reserved number %d", f.FullName(), f.Number())
- }
- if m.L2.ExtensionRanges.Has(f.Number()) {
- return errors.New("message field %q with number %d in extension range", f.FullName(), f.Number())
- }
- if fd.Extendee != nil {
- return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee())
- }
- if f.L1.IsProto3Optional {
- if !isProto3 {
- return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName())
- }
- if f.Cardinality() != protoreflect.Optional {
- return errors.New("message field %q under proto3 optional semantics must have optional cardinality", f.FullName())
- }
- if f.ContainingOneof() != nil && f.ContainingOneof().Fields().Len() != 1 {
- return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName())
- }
- }
- if f.IsPacked() && !isPackable(f) {
- return errors.New("message field %q is not packable", f.FullName())
- }
- if err := checkValidGroup(file, f); err != nil {
- return errors.New("message field %q is an invalid group: %v", f.FullName(), err)
- }
- if err := checkValidMap(f); err != nil {
- return errors.New("message field %q is an invalid map: %v", f.FullName(), err)
- }
- if isProto3 {
- if f.Cardinality() == protoreflect.Required {
- return errors.New("message field %q using proto3 semantics cannot be required", f.FullName())
- }
- if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() {
- return errors.New("message field %q using proto3 semantics may only depend on open enums", f.FullName())
- }
- }
- if f.Cardinality() == protoreflect.Optional && !f.HasPresence() && f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() {
- return errors.New("message field %q with implicit presence may only use open enums", f.FullName())
- }
- }
- seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs
- for j := range md.GetOneofDecl() {
- o := &m.L2.Oneofs.List[j]
- if o.Fields().Len() == 0 {
- return errors.New("message oneof %q must contain at least one field declaration", o.FullName())
- }
- if n := o.Fields().Len(); n-1 != (o.Fields().Get(n-1).Index() - o.Fields().Get(0).Index()) {
- return errors.New("message oneof %q must have consecutively declared fields", o.FullName())
- }
-
- if o.IsSynthetic() {
- seenSynthetic = true
- continue
- }
- if !o.IsSynthetic() && seenSynthetic {
- return errors.New("message oneof %q must be declared before synthetic oneofs", o.FullName())
- }
-
- for i := 0; i < o.Fields().Len(); i++ {
- f := o.Fields().Get(i)
- if f.Cardinality() != protoreflect.Optional {
- return errors.New("message field %q belongs in a oneof and must be optional", f.FullName())
- }
- }
- }
-
- if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil {
- return err
- }
- if err := validateMessageDeclarations(file, m.L1.Messages.List, md.GetNestedType()); err != nil {
- return err
- }
- if err := validateExtensionDeclarations(file, m.L1.Extensions.List, md.GetExtension()); err != nil {
- return err
- }
- }
- return nil
-}
-
-func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error {
- for i, xd := range xds {
- x := &xs[i]
- // NOTE: Avoid using the IsValid method since extensions to MessageSet
- // may have a field number higher than normal. This check only verifies
- // that the number is not negative or reserved. We check again later
- // if we know that the extendee is definitely not a MessageSet.
- if n := x.Number(); n < 0 || (protowire.FirstReservedNumber <= n && n <= protowire.LastReservedNumber) {
- return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number())
- }
- if !x.Cardinality().IsValid() || x.Cardinality() == protoreflect.Required {
- return errors.New("extension field %q has an invalid cardinality: %d", x.FullName(), x.Cardinality())
- }
- if xd.JsonName != nil {
- // A bug in older versions of protoc would always populate the
- // "json_name" option for extensions when it is meaningless.
- // When it did so, it would always use the camel-cased field name.
- if xd.GetJsonName() != strs.JSONCamelCase(string(x.Name())) {
- return errors.New("extension field %q may not have an explicitly set JSON name: %q", x.FullName(), xd.GetJsonName())
- }
- }
- if xd.OneofIndex != nil {
- return errors.New("extension field %q may not be part of a oneof", x.FullName())
- }
- if md := x.ContainingMessage(); !md.IsPlaceholder() {
- if !md.ExtensionRanges().Has(x.Number()) {
- return errors.New("extension field %q extends %q with non-extension field number: %d", x.FullName(), md.FullName(), x.Number())
- }
- isMessageSet := md.Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat()
- if isMessageSet && !isOptionalMessage(x) {
- return errors.New("extension field %q extends MessageSet and must be an optional message", x.FullName())
- }
- if !isMessageSet && !x.Number().IsValid() {
- return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number())
- }
- }
- if x.IsPacked() && !isPackable(x) {
- return errors.New("extension field %q is not packable", x.FullName())
- }
- if err := checkValidGroup(f, x); err != nil {
- return errors.New("extension field %q is an invalid group: %v", x.FullName(), err)
- }
- if md := x.Message(); md != nil && md.IsMapEntry() {
- return errors.New("extension field %q cannot be a map entry", x.FullName())
- }
- if f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) {
- switch x.ContainingMessage().FullName() {
- case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName():
- case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName():
- case (*descriptorpb.EnumValueOptions)(nil).ProtoReflect().Descriptor().FullName():
- case (*descriptorpb.MessageOptions)(nil).ProtoReflect().Descriptor().FullName():
- case (*descriptorpb.FieldOptions)(nil).ProtoReflect().Descriptor().FullName():
- case (*descriptorpb.OneofOptions)(nil).ProtoReflect().Descriptor().FullName():
- case (*descriptorpb.ExtensionRangeOptions)(nil).ProtoReflect().Descriptor().FullName():
- case (*descriptorpb.ServiceOptions)(nil).ProtoReflect().Descriptor().FullName():
- case (*descriptorpb.MethodOptions)(nil).ProtoReflect().Descriptor().FullName():
- default:
- return errors.New("extension field %q cannot be declared in proto3 unless extended descriptor options", x.FullName())
- }
- }
- }
- return nil
-}
-
-// isOptionalMessage reports whether this is an optional message.
-// If the kind is unknown, it is assumed to be a message.
-func isOptionalMessage(fd protoreflect.FieldDescriptor) bool {
- return (fd.Kind() == 0 || fd.Kind() == protoreflect.MessageKind) && fd.Cardinality() == protoreflect.Optional
-}
-
-// isPackable checks whether the pack option can be specified.
-func isPackable(fd protoreflect.FieldDescriptor) bool {
- switch fd.Kind() {
- case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind:
- return false
- }
- return fd.IsList()
-}
-
-// checkValidGroup reports whether fd is a valid group according to the same
-// rules that protoc imposes.
-func checkValidGroup(f *filedesc.File, fd protoreflect.FieldDescriptor) error {
- md := fd.Message()
- switch {
- case fd.Kind() != protoreflect.GroupKind:
- return nil
- case f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3):
- return errors.New("invalid under proto3 semantics")
- case md == nil || md.IsPlaceholder():
- return errors.New("message must be resolvable")
- }
- if f.L1.Edition < fromEditionProto(descriptorpb.Edition_EDITION_2023) {
- switch {
- case fd.FullName().Parent() != md.FullName().Parent():
- return errors.New("message and field must be declared in the same scope")
- case !unicode.IsUpper(rune(md.Name()[0])):
- return errors.New("message name must start with an uppercase")
- case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))):
- return errors.New("field name must be lowercased form of the message name")
- }
- }
- return nil
-}
-
-// checkValidMap checks whether the field is a valid map according to the same
-// rules that protoc imposes.
-// See protoc v3.8.0: src/google/protobuf/descriptor.cc:6045-6115
-func checkValidMap(fd protoreflect.FieldDescriptor) error {
- md := fd.Message()
- switch {
- case md == nil || !md.IsMapEntry():
- return nil
- case fd.FullName().Parent() != md.FullName().Parent():
- return errors.New("message and field must be declared in the same scope")
- case md.Name() != protoreflect.Name(strs.MapEntryName(string(fd.Name()))):
- return errors.New("incorrect implicit map entry name")
- case fd.Cardinality() != protoreflect.Repeated:
- return errors.New("field must be repeated")
- case md.Fields().Len() != 2:
- return errors.New("message must have exactly two fields")
- case md.ExtensionRanges().Len() > 0:
- return errors.New("message must not have any extension ranges")
- case md.Enums().Len()+md.Messages().Len()+md.Extensions().Len() > 0:
- return errors.New("message must not have any nested declarations")
- }
- kf := md.Fields().Get(0)
- vf := md.Fields().Get(1)
- switch {
- case kf.Name() != genid.MapEntry_Key_field_name || kf.Number() != genid.MapEntry_Key_field_number || kf.Cardinality() != protoreflect.Optional || kf.ContainingOneof() != nil || kf.HasDefault():
- return errors.New("invalid key field")
- case vf.Name() != genid.MapEntry_Value_field_name || vf.Number() != genid.MapEntry_Value_field_number || vf.Cardinality() != protoreflect.Optional || vf.ContainingOneof() != nil || vf.HasDefault():
- return errors.New("invalid value field")
- }
- switch kf.Kind() {
- case protoreflect.BoolKind: // bool
- case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: // int32
- case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: // int64
- case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: // uint32
- case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: // uint64
- case protoreflect.StringKind: // string
- default:
- return errors.New("invalid key kind: %v", kf.Kind())
- }
- if e := vf.Enum(); e != nil && e.Values().Len() > 0 && e.Values().Get(0).Number() != 0 {
- return errors.New("map enum value must have zero number for the first value")
- }
- return nil
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
deleted file mode 100644
index 697a61b29..000000000
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package protodesc
-
-import (
- "fmt"
- "os"
- "sync"
-
- "google.golang.org/protobuf/internal/editiondefaults"
- "google.golang.org/protobuf/internal/filedesc"
- "google.golang.org/protobuf/internal/genid"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/types/descriptorpb"
- "google.golang.org/protobuf/types/gofeaturespb"
-)
-
-var defaults = &descriptorpb.FeatureSetDefaults{}
-var defaultsCacheMu sync.Mutex
-var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet)
-
-func init() {
- err := proto.Unmarshal(editiondefaults.Defaults, defaults)
- if err != nil {
- fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err)
- os.Exit(1)
- }
-}
-
-func fromEditionProto(epb descriptorpb.Edition) filedesc.Edition {
- return filedesc.Edition(epb)
-}
-
-func toEditionProto(ed filedesc.Edition) descriptorpb.Edition {
- switch ed {
- case filedesc.EditionUnknown:
- return descriptorpb.Edition_EDITION_UNKNOWN
- case filedesc.EditionProto2:
- return descriptorpb.Edition_EDITION_PROTO2
- case filedesc.EditionProto3:
- return descriptorpb.Edition_EDITION_PROTO3
- case filedesc.Edition2023:
- return descriptorpb.Edition_EDITION_2023
- case filedesc.Edition2024:
- return descriptorpb.Edition_EDITION_2024
- default:
- panic(fmt.Sprintf("unknown value for edition: %v", ed))
- }
-}
-
-func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet {
- defaultsCacheMu.Lock()
- defer defaultsCacheMu.Unlock()
- if def, ok := defaultsCache[ed]; ok {
- return def
- }
- edpb := toEditionProto(ed)
- if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb {
- // This should never happen protodesc.(FileOptions).New would fail when
- // initializing the file descriptor.
- // This most likely means the embedded defaults were not updated.
- fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb)
- os.Exit(1)
- }
- fsed := defaults.GetDefaults()[0]
- // Using a linear search for now.
- // Editions are guaranteed to be sorted and thus we could use a binary search.
- // Given that there are only a handful of editions (with one more per year)
- // there is not much reason to use a binary search.
- for _, def := range defaults.GetDefaults() {
- if def.GetEdition() <= edpb {
- fsed = def
- } else {
- break
- }
- }
- fs := proto.Clone(fsed.GetFixedFeatures()).(*descriptorpb.FeatureSet)
- proto.Merge(fs, fsed.GetOverridableFeatures())
- defaultsCache[ed] = fs
- return fs
-}
-
-// mergeEditionFeatures merges the parent and child feature sets. This function
-// should be used when initializing Go descriptors from descriptor protos which
-// is why the parent is a filedesc.EditionsFeatures (Go representation) while
-// the child is a descriptorproto.FeatureSet (protoc representation).
-// Any feature set by the child overwrites what is set by the parent.
-func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorpb.FeatureSet) filedesc.EditionFeatures {
- var parentFS filedesc.EditionFeatures
- switch p := parentDesc.(type) {
- case *filedesc.File:
- parentFS = p.L1.EditionFeatures
- case *filedesc.Message:
- parentFS = p.L1.EditionFeatures
- default:
- panic(fmt.Sprintf("unknown parent type %T", parentDesc))
- }
- if child == nil {
- return parentFS
- }
- if fp := child.FieldPresence; fp != nil {
- parentFS.IsFieldPresence = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED ||
- *fp == descriptorpb.FeatureSet_EXPLICIT
- parentFS.IsLegacyRequired = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED
- }
- if et := child.EnumType; et != nil {
- parentFS.IsOpenEnum = *et == descriptorpb.FeatureSet_OPEN
- }
-
- if rfe := child.RepeatedFieldEncoding; rfe != nil {
- parentFS.IsPacked = *rfe == descriptorpb.FeatureSet_PACKED
- }
-
- if utf8val := child.Utf8Validation; utf8val != nil {
- parentFS.IsUTF8Validated = *utf8val == descriptorpb.FeatureSet_VERIFY
- }
-
- if me := child.MessageEncoding; me != nil {
- parentFS.IsDelimitedEncoded = *me == descriptorpb.FeatureSet_DELIMITED
- }
-
- if jf := child.JsonFormat; jf != nil {
- parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW
- }
-
- // We must not use proto.GetExtension(child, gofeaturespb.E_Go)
- // because that only works for messages we generated, but not for
- // dynamicpb messages. See golang/protobuf#1669.
- //
- // Further, we harden this code against adversarial inputs: a
- // service which accepts descriptors from a possibly malicious
- // source shouldn't crash.
- goFeatures := child.ProtoReflect().Get(gofeaturespb.E_Go.TypeDescriptor())
- if !goFeatures.IsValid() {
- return parentFS
- }
- gf, ok := goFeatures.Interface().(protoreflect.Message)
- if !ok {
- return parentFS
- }
- // gf.Interface() could be *dynamicpb.Message or *gofeaturespb.GoFeatures.
- fields := gf.Descriptor().Fields()
-
- if fd := fields.ByNumber(genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number); fd != nil &&
- !fd.IsList() &&
- fd.Kind() == protoreflect.BoolKind &&
- gf.Has(fd) {
- parentFS.GenerateLegacyUnmarshalJSON = gf.Get(fd).Bool()
- }
-
- if fd := fields.ByNumber(genid.GoFeatures_StripEnumPrefix_field_number); fd != nil &&
- !fd.IsList() &&
- fd.Kind() == protoreflect.EnumKind &&
- gf.Has(fd) {
- parentFS.StripEnumPrefix = int(gf.Get(fd).Enum())
- }
-
- if fd := fields.ByNumber(genid.GoFeatures_ApiLevel_field_number); fd != nil &&
- !fd.IsList() &&
- fd.Kind() == protoreflect.EnumKind &&
- gf.Has(fd) {
- parentFS.APILevel = int(gf.Get(fd).Enum())
- }
-
- return parentFS
-}
-
-// initFileDescFromFeatureSet initializes editions related fields in fd based
-// on fs. If fs is nil it is assumed to be an empty featureset and all fields
-// will be initialized with the appropriate default. fd.L1.Edition must be set
-// before calling this function.
-func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) {
- dfs := getFeatureSetFor(fd.L1.Edition)
- // initialize the featureset with the defaults
- fd.L1.EditionFeatures = mergeEditionFeatures(fd, dfs)
- // overwrite any options explicitly specified
- fd.L1.EditionFeatures = mergeEditionFeatures(fd, fs)
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
deleted file mode 100644
index 9b880aa8c..000000000
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
+++ /dev/null
@@ -1,271 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package protodesc
-
-import (
- "fmt"
- "strings"
-
- "google.golang.org/protobuf/internal/encoding/defval"
- "google.golang.org/protobuf/internal/strs"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
-
- "google.golang.org/protobuf/types/descriptorpb"
-)
-
-// ToFileDescriptorProto copies a [protoreflect.FileDescriptor] into a
-// google.protobuf.FileDescriptorProto message.
-func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto {
- p := &descriptorpb.FileDescriptorProto{
- Name: proto.String(file.Path()),
- Options: proto.Clone(file.Options()).(*descriptorpb.FileOptions),
- }
- if file.Package() != "" {
- p.Package = proto.String(string(file.Package()))
- }
- for i, imports := 0, file.Imports(); i < imports.Len(); i++ {
- imp := imports.Get(i)
- p.Dependency = append(p.Dependency, imp.Path())
- if imp.IsPublic {
- p.PublicDependency = append(p.PublicDependency, int32(i))
- }
- }
- for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ {
- loc := locs.Get(i)
- l := &descriptorpb.SourceCodeInfo_Location{}
- l.Path = append(l.Path, loc.Path...)
- if loc.StartLine == loc.EndLine {
- l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndColumn)}
- } else {
- l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndLine), int32(loc.EndColumn)}
- }
- l.LeadingDetachedComments = append([]string(nil), loc.LeadingDetachedComments...)
- if loc.LeadingComments != "" {
- l.LeadingComments = proto.String(loc.LeadingComments)
- }
- if loc.TrailingComments != "" {
- l.TrailingComments = proto.String(loc.TrailingComments)
- }
- if p.SourceCodeInfo == nil {
- p.SourceCodeInfo = &descriptorpb.SourceCodeInfo{}
- }
- p.SourceCodeInfo.Location = append(p.SourceCodeInfo.Location, l)
-
- }
- for i, messages := 0, file.Messages(); i < messages.Len(); i++ {
- p.MessageType = append(p.MessageType, ToDescriptorProto(messages.Get(i)))
- }
- for i, enums := 0, file.Enums(); i < enums.Len(); i++ {
- p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i)))
- }
- for i, services := 0, file.Services(); i < services.Len(); i++ {
- p.Service = append(p.Service, ToServiceDescriptorProto(services.Get(i)))
- }
- for i, exts := 0, file.Extensions(); i < exts.Len(); i++ {
- p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i)))
- }
- if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() {
- p.Syntax = proto.String(file.Syntax().String())
- }
- if file.Syntax() == protoreflect.Editions {
- desc := file
- if fileImportDesc, ok := file.(protoreflect.FileImport); ok {
- desc = fileImportDesc.FileDescriptor
- }
-
- if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok {
- p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum()
- }
- }
- return p
-}
-
-// ToDescriptorProto copies a [protoreflect.MessageDescriptor] into a
-// google.protobuf.DescriptorProto message.
-func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto {
- p := &descriptorpb.DescriptorProto{
- Name: proto.String(string(message.Name())),
- Options: proto.Clone(message.Options()).(*descriptorpb.MessageOptions),
- }
- for i, fields := 0, message.Fields(); i < fields.Len(); i++ {
- p.Field = append(p.Field, ToFieldDescriptorProto(fields.Get(i)))
- }
- for i, exts := 0, message.Extensions(); i < exts.Len(); i++ {
- p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i)))
- }
- for i, messages := 0, message.Messages(); i < messages.Len(); i++ {
- p.NestedType = append(p.NestedType, ToDescriptorProto(messages.Get(i)))
- }
- for i, enums := 0, message.Enums(); i < enums.Len(); i++ {
- p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i)))
- }
- for i, xranges := 0, message.ExtensionRanges(); i < xranges.Len(); i++ {
- xrange := xranges.Get(i)
- p.ExtensionRange = append(p.ExtensionRange, &descriptorpb.DescriptorProto_ExtensionRange{
- Start: proto.Int32(int32(xrange[0])),
- End: proto.Int32(int32(xrange[1])),
- Options: proto.Clone(message.ExtensionRangeOptions(i)).(*descriptorpb.ExtensionRangeOptions),
- })
- }
- for i, oneofs := 0, message.Oneofs(); i < oneofs.Len(); i++ {
- p.OneofDecl = append(p.OneofDecl, ToOneofDescriptorProto(oneofs.Get(i)))
- }
- for i, ranges := 0, message.ReservedRanges(); i < ranges.Len(); i++ {
- rrange := ranges.Get(i)
- p.ReservedRange = append(p.ReservedRange, &descriptorpb.DescriptorProto_ReservedRange{
- Start: proto.Int32(int32(rrange[0])),
- End: proto.Int32(int32(rrange[1])),
- })
- }
- for i, names := 0, message.ReservedNames(); i < names.Len(); i++ {
- p.ReservedName = append(p.ReservedName, string(names.Get(i)))
- }
- return p
-}
-
-// ToFieldDescriptorProto copies a [protoreflect.FieldDescriptor] into a
-// google.protobuf.FieldDescriptorProto message.
-func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto {
- p := &descriptorpb.FieldDescriptorProto{
- Name: proto.String(string(field.Name())),
- Number: proto.Int32(int32(field.Number())),
- Label: descriptorpb.FieldDescriptorProto_Label(field.Cardinality()).Enum(),
- Options: proto.Clone(field.Options()).(*descriptorpb.FieldOptions),
- }
- if field.IsExtension() {
- p.Extendee = fullNameOf(field.ContainingMessage())
- }
- if field.Kind().IsValid() {
- p.Type = descriptorpb.FieldDescriptorProto_Type(field.Kind()).Enum()
- }
- if field.Enum() != nil {
- p.TypeName = fullNameOf(field.Enum())
- }
- if field.Message() != nil {
- p.TypeName = fullNameOf(field.Message())
- }
- if field.HasJSONName() {
- // A bug in older versions of protoc would always populate the
- // "json_name" option for extensions when it is meaningless.
- // When it did so, it would always use the camel-cased field name.
- if field.IsExtension() {
- p.JsonName = proto.String(strs.JSONCamelCase(string(field.Name())))
- } else {
- p.JsonName = proto.String(field.JSONName())
- }
- }
- if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() {
- p.Proto3Optional = proto.Bool(true)
- }
- if field.Syntax() == protoreflect.Editions {
- // Editions have no group keyword, this type is only set so that downstream users continue
- // treating this as delimited encoding.
- if p.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP {
- p.Type = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE.Enum()
- }
- // Editions have no required keyword, this label is only set so that downstream users continue
- // treating it as required.
- if p.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED {
- p.Label = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum()
- }
- }
- if field.HasDefault() {
- def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor)
- if err != nil && field.DefaultEnumValue() != nil {
- def = string(field.DefaultEnumValue().Name()) // occurs for unresolved enum values
- } else if err != nil {
- panic(fmt.Sprintf("%v: %v", field.FullName(), err))
- }
- p.DefaultValue = proto.String(def)
- }
- if oneof := field.ContainingOneof(); oneof != nil {
- p.OneofIndex = proto.Int32(int32(oneof.Index()))
- }
- return p
-}
-
-// ToOneofDescriptorProto copies a [protoreflect.OneofDescriptor] into a
-// google.protobuf.OneofDescriptorProto message.
-func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto {
- return &descriptorpb.OneofDescriptorProto{
- Name: proto.String(string(oneof.Name())),
- Options: proto.Clone(oneof.Options()).(*descriptorpb.OneofOptions),
- }
-}
-
-// ToEnumDescriptorProto copies a [protoreflect.EnumDescriptor] into a
-// google.protobuf.EnumDescriptorProto message.
-func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto {
- p := &descriptorpb.EnumDescriptorProto{
- Name: proto.String(string(enum.Name())),
- Options: proto.Clone(enum.Options()).(*descriptorpb.EnumOptions),
- }
- for i, values := 0, enum.Values(); i < values.Len(); i++ {
- p.Value = append(p.Value, ToEnumValueDescriptorProto(values.Get(i)))
- }
- for i, ranges := 0, enum.ReservedRanges(); i < ranges.Len(); i++ {
- rrange := ranges.Get(i)
- p.ReservedRange = append(p.ReservedRange, &descriptorpb.EnumDescriptorProto_EnumReservedRange{
- Start: proto.Int32(int32(rrange[0])),
- End: proto.Int32(int32(rrange[1])),
- })
- }
- for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ {
- p.ReservedName = append(p.ReservedName, string(names.Get(i)))
- }
- return p
-}
-
-// ToEnumValueDescriptorProto copies a [protoreflect.EnumValueDescriptor] into a
-// google.protobuf.EnumValueDescriptorProto message.
-func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto {
- return &descriptorpb.EnumValueDescriptorProto{
- Name: proto.String(string(value.Name())),
- Number: proto.Int32(int32(value.Number())),
- Options: proto.Clone(value.Options()).(*descriptorpb.EnumValueOptions),
- }
-}
-
-// ToServiceDescriptorProto copies a [protoreflect.ServiceDescriptor] into a
-// google.protobuf.ServiceDescriptorProto message.
-func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto {
- p := &descriptorpb.ServiceDescriptorProto{
- Name: proto.String(string(service.Name())),
- Options: proto.Clone(service.Options()).(*descriptorpb.ServiceOptions),
- }
- for i, methods := 0, service.Methods(); i < methods.Len(); i++ {
- p.Method = append(p.Method, ToMethodDescriptorProto(methods.Get(i)))
- }
- return p
-}
-
-// ToMethodDescriptorProto copies a [protoreflect.MethodDescriptor] into a
-// google.protobuf.MethodDescriptorProto message.
-func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto {
- p := &descriptorpb.MethodDescriptorProto{
- Name: proto.String(string(method.Name())),
- InputType: fullNameOf(method.Input()),
- OutputType: fullNameOf(method.Output()),
- Options: proto.Clone(method.Options()).(*descriptorpb.MethodOptions),
- }
- if method.IsStreamingClient() {
- p.ClientStreaming = proto.Bool(true)
- }
- if method.IsStreamingServer() {
- p.ServerStreaming = proto.Bool(true)
- }
- return p
-}
-
-func fullNameOf(d protoreflect.Descriptor) *string {
- if d == nil {
- return nil
- }
- if strings.HasPrefix(string(d.FullName()), unknownPrefix) {
- return proto.String(string(d.FullName()[len(unknownPrefix):]))
- }
- return proto.String("." + string(d.FullName()))
-}
diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
deleted file mode 100644
index 37e712b6b..000000000
--- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
+++ /dev/null
@@ -1,311 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2023 Google Inc. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file or at
-// https://developers.google.com/open-source/licenses/bsd
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/go_features.proto
-
-package gofeaturespb
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- descriptorpb "google.golang.org/protobuf/types/descriptorpb"
- reflect "reflect"
- sync "sync"
- unsafe "unsafe"
-)
-
-type GoFeatures_APILevel int32
-
-const (
- // API_LEVEL_UNSPECIFIED results in selecting the OPEN API,
- // but needs to be a separate value to distinguish between
- // an explicitly set api level or a missing api level.
- GoFeatures_API_LEVEL_UNSPECIFIED GoFeatures_APILevel = 0
- GoFeatures_API_OPEN GoFeatures_APILevel = 1
- GoFeatures_API_HYBRID GoFeatures_APILevel = 2
- GoFeatures_API_OPAQUE GoFeatures_APILevel = 3
-)
-
-// Enum value maps for GoFeatures_APILevel.
-var (
- GoFeatures_APILevel_name = map[int32]string{
- 0: "API_LEVEL_UNSPECIFIED",
- 1: "API_OPEN",
- 2: "API_HYBRID",
- 3: "API_OPAQUE",
- }
- GoFeatures_APILevel_value = map[string]int32{
- "API_LEVEL_UNSPECIFIED": 0,
- "API_OPEN": 1,
- "API_HYBRID": 2,
- "API_OPAQUE": 3,
- }
-)
-
-func (x GoFeatures_APILevel) Enum() *GoFeatures_APILevel {
- p := new(GoFeatures_APILevel)
- *p = x
- return p
-}
-
-func (x GoFeatures_APILevel) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (GoFeatures_APILevel) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_go_features_proto_enumTypes[0].Descriptor()
-}
-
-func (GoFeatures_APILevel) Type() protoreflect.EnumType {
- return &file_google_protobuf_go_features_proto_enumTypes[0]
-}
-
-func (x GoFeatures_APILevel) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Do not use.
-func (x *GoFeatures_APILevel) UnmarshalJSON(b []byte) error {
- num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
- if err != nil {
- return err
- }
- *x = GoFeatures_APILevel(num)
- return nil
-}
-
-// Deprecated: Use GoFeatures_APILevel.Descriptor instead.
-func (GoFeatures_APILevel) EnumDescriptor() ([]byte, []int) {
- return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 0}
-}
-
-type GoFeatures_StripEnumPrefix int32
-
-const (
- GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED GoFeatures_StripEnumPrefix = 0
- GoFeatures_STRIP_ENUM_PREFIX_KEEP GoFeatures_StripEnumPrefix = 1
- GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH GoFeatures_StripEnumPrefix = 2
- GoFeatures_STRIP_ENUM_PREFIX_STRIP GoFeatures_StripEnumPrefix = 3
-)
-
-// Enum value maps for GoFeatures_StripEnumPrefix.
-var (
- GoFeatures_StripEnumPrefix_name = map[int32]string{
- 0: "STRIP_ENUM_PREFIX_UNSPECIFIED",
- 1: "STRIP_ENUM_PREFIX_KEEP",
- 2: "STRIP_ENUM_PREFIX_GENERATE_BOTH",
- 3: "STRIP_ENUM_PREFIX_STRIP",
- }
- GoFeatures_StripEnumPrefix_value = map[string]int32{
- "STRIP_ENUM_PREFIX_UNSPECIFIED": 0,
- "STRIP_ENUM_PREFIX_KEEP": 1,
- "STRIP_ENUM_PREFIX_GENERATE_BOTH": 2,
- "STRIP_ENUM_PREFIX_STRIP": 3,
- }
-)
-
-func (x GoFeatures_StripEnumPrefix) Enum() *GoFeatures_StripEnumPrefix {
- p := new(GoFeatures_StripEnumPrefix)
- *p = x
- return p
-}
-
-func (x GoFeatures_StripEnumPrefix) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (GoFeatures_StripEnumPrefix) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_go_features_proto_enumTypes[1].Descriptor()
-}
-
-func (GoFeatures_StripEnumPrefix) Type() protoreflect.EnumType {
- return &file_google_protobuf_go_features_proto_enumTypes[1]
-}
-
-func (x GoFeatures_StripEnumPrefix) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Do not use.
-func (x *GoFeatures_StripEnumPrefix) UnmarshalJSON(b []byte) error {
- num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
- if err != nil {
- return err
- }
- *x = GoFeatures_StripEnumPrefix(num)
- return nil
-}
-
-// Deprecated: Use GoFeatures_StripEnumPrefix.Descriptor instead.
-func (GoFeatures_StripEnumPrefix) EnumDescriptor() ([]byte, []int) {
- return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 1}
-}
-
-type GoFeatures struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // Whether or not to generate the deprecated UnmarshalJSON method for enums.
- // Can only be true for proto using the Open Struct api.
- LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"`
- // One of OPEN, HYBRID or OPAQUE.
- ApiLevel *GoFeatures_APILevel `protobuf:"varint,2,opt,name=api_level,json=apiLevel,enum=pb.GoFeatures_APILevel" json:"api_level,omitempty"`
- StripEnumPrefix *GoFeatures_StripEnumPrefix `protobuf:"varint,3,opt,name=strip_enum_prefix,json=stripEnumPrefix,enum=pb.GoFeatures_StripEnumPrefix" json:"strip_enum_prefix,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *GoFeatures) Reset() {
- *x = GoFeatures{}
- mi := &file_google_protobuf_go_features_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *GoFeatures) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GoFeatures) ProtoMessage() {}
-
-func (x *GoFeatures) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_go_features_proto_msgTypes[0]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead.
-func (*GoFeatures) Descriptor() ([]byte, []int) {
- return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool {
- if x != nil && x.LegacyUnmarshalJsonEnum != nil {
- return *x.LegacyUnmarshalJsonEnum
- }
- return false
-}
-
-func (x *GoFeatures) GetApiLevel() GoFeatures_APILevel {
- if x != nil && x.ApiLevel != nil {
- return *x.ApiLevel
- }
- return GoFeatures_API_LEVEL_UNSPECIFIED
-}
-
-func (x *GoFeatures) GetStripEnumPrefix() GoFeatures_StripEnumPrefix {
- if x != nil && x.StripEnumPrefix != nil {
- return *x.StripEnumPrefix
- }
- return GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED
-}
-
-var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{
- {
- ExtendedType: (*descriptorpb.FeatureSet)(nil),
- ExtensionType: (*GoFeatures)(nil),
- Field: 1002,
- Name: "pb.go",
- Tag: "bytes,1002,opt,name=go",
- Filename: "google/protobuf/go_features.proto",
- },
-}
-
-// Extension fields to descriptorpb.FeatureSet.
-var (
- // optional pb.GoFeatures go = 1002;
- E_Go = &file_google_protobuf_go_features_proto_extTypes[0]
-)
-
-var File_google_protobuf_go_features_proto protoreflect.FileDescriptor
-
-const file_google_protobuf_go_features_proto_rawDesc = "" +
- "\n" +
- "!google/protobuf/go_features.proto\x12\x02pb\x1a google/protobuf/descriptor.proto\"\xab\x05\n" +
- "\n" +
- "GoFeatures\x12\xbe\x01\n" +
- "\x1alegacy_unmarshal_json_enum\x18\x01 \x01(\bB\x80\x01\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\t\x12\x04true\x18\x84\a\xa2\x01\n" +
- "\x12\x05false\x18\xe7\a\xb2\x01[\b\xe8\a\x10\xe8\a\x1aSThe legacy UnmarshalJSON API is deprecated and will be removed in a future edition.R\x17legacyUnmarshalJsonEnum\x12t\n" +
- "\tapi_level\x18\x02 \x01(\x0e2\x17.pb.GoFeatures.APILevelB>\x88\x01\x01\x98\x01\x03\x98\x01\x01\xa2\x01\x1a\x12\x15API_LEVEL_UNSPECIFIED\x18\x84\a\xa2\x01\x0f\x12\n" +
- "API_OPAQUE\x18\xe9\a\xb2\x01\x03\b\xe8\aR\bapiLevel\x12|\n" +
- "\x11strip_enum_prefix\x18\x03 \x01(\x0e2\x1e.pb.GoFeatures.StripEnumPrefixB0\x88\x01\x01\x98\x01\x06\x98\x01\a\x98\x01\x01\xa2\x01\x1b\x12\x16STRIP_ENUM_PREFIX_KEEP\x18\x84\a\xb2\x01\x03\b\xe9\aR\x0fstripEnumPrefix\"S\n" +
- "\bAPILevel\x12\x19\n" +
- "\x15API_LEVEL_UNSPECIFIED\x10\x00\x12\f\n" +
- "\bAPI_OPEN\x10\x01\x12\x0e\n" +
- "\n" +
- "API_HYBRID\x10\x02\x12\x0e\n" +
- "\n" +
- "API_OPAQUE\x10\x03\"\x92\x01\n" +
- "\x0fStripEnumPrefix\x12!\n" +
- "\x1dSTRIP_ENUM_PREFIX_UNSPECIFIED\x10\x00\x12\x1a\n" +
- "\x16STRIP_ENUM_PREFIX_KEEP\x10\x01\x12#\n" +
- "\x1fSTRIP_ENUM_PREFIX_GENERATE_BOTH\x10\x02\x12\x1b\n" +
- "\x17STRIP_ENUM_PREFIX_STRIP\x10\x03:<\n" +
- "\x02go\x12\x1b.google.protobuf.FeatureSet\x18\xea\a \x01(\v2\x0e.pb.GoFeaturesR\x02goB/Z-google.golang.org/protobuf/types/gofeaturespb"
-
-var (
- file_google_protobuf_go_features_proto_rawDescOnce sync.Once
- file_google_protobuf_go_features_proto_rawDescData []byte
-)
-
-func file_google_protobuf_go_features_proto_rawDescGZIP() []byte {
- file_google_protobuf_go_features_proto_rawDescOnce.Do(func() {
- file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)))
- })
- return file_google_protobuf_go_features_proto_rawDescData
-}
-
-var file_google_protobuf_go_features_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
-var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_go_features_proto_goTypes = []any{
- (GoFeatures_APILevel)(0), // 0: pb.GoFeatures.APILevel
- (GoFeatures_StripEnumPrefix)(0), // 1: pb.GoFeatures.StripEnumPrefix
- (*GoFeatures)(nil), // 2: pb.GoFeatures
- (*descriptorpb.FeatureSet)(nil), // 3: google.protobuf.FeatureSet
-}
-var file_google_protobuf_go_features_proto_depIdxs = []int32{
- 0, // 0: pb.GoFeatures.api_level:type_name -> pb.GoFeatures.APILevel
- 1, // 1: pb.GoFeatures.strip_enum_prefix:type_name -> pb.GoFeatures.StripEnumPrefix
- 3, // 2: pb.go:extendee -> google.protobuf.FeatureSet
- 2, // 3: pb.go:type_name -> pb.GoFeatures
- 4, // [4:4] is the sub-list for method output_type
- 4, // [4:4] is the sub-list for method input_type
- 3, // [3:4] is the sub-list for extension type_name
- 2, // [2:3] is the sub-list for extension extendee
- 0, // [0:2] is the sub-list for field type_name
-}
-
-func init() { file_google_protobuf_go_features_proto_init() }
-func file_google_protobuf_go_features_proto_init() {
- if File_google_protobuf_go_features_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)),
- NumEnums: 2,
- NumMessages: 1,
- NumExtensions: 1,
- NumServices: 0,
- },
- GoTypes: file_google_protobuf_go_features_proto_goTypes,
- DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs,
- EnumInfos: file_google_protobuf_go_features_proto_enumTypes,
- MessageInfos: file_google_protobuf_go_features_proto_msgTypes,
- ExtensionInfos: file_google_protobuf_go_features_proto_extTypes,
- }.Build()
- File_google_protobuf_go_features_proto = out.File
- file_google_protobuf_go_features_proto_goTypes = nil
- file_google_protobuf_go_features_proto_depIdxs = nil
-}
diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
deleted file mode 100644
index ca2e7b38f..000000000
--- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
+++ /dev/null
@@ -1,346 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/duration.proto
-
-// Package durationpb contains generated types for google/protobuf/duration.proto.
-//
-// The Duration message represents a signed span of time.
-//
-// # Conversion to a Go Duration
-//
-// The AsDuration method can be used to convert a Duration message to a
-// standard Go time.Duration value:
-//
-// d := dur.AsDuration()
-// ... // make use of d as a time.Duration
-//
-// Converting to a time.Duration is a common operation so that the extensive
-// set of time-based operations provided by the time package can be leveraged.
-// See https://golang.org/pkg/time for more information.
-//
-// The AsDuration method performs the conversion on a best-effort basis.
-// Durations with denormal values (e.g., nanoseconds beyond -99999999 and
-// +99999999, inclusive; or seconds and nanoseconds with opposite signs)
-// are normalized during the conversion to a time.Duration. To manually check for
-// invalid Duration per the documented limitations in duration.proto,
-// additionally call the CheckValid method:
-//
-// if err := dur.CheckValid(); err != nil {
-// ... // handle error
-// }
-//
-// Note that the documented limitations in duration.proto does not protect a
-// Duration from overflowing the representable range of a time.Duration in Go.
-// The AsDuration method uses saturation arithmetic such that an overflow clamps
-// the resulting value to the closest representable value (e.g., math.MaxInt64
-// for positive overflow and math.MinInt64 for negative overflow).
-//
-// # Conversion from a Go Duration
-//
-// The durationpb.New function can be used to construct a Duration message
-// from a standard Go time.Duration value:
-//
-// dur := durationpb.New(d)
-// ... // make use of d as a *durationpb.Duration
-package durationpb
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- math "math"
- reflect "reflect"
- sync "sync"
- time "time"
- unsafe "unsafe"
-)
-
-// A Duration represents a signed, fixed-length span of time represented
-// as a count of seconds and fractions of seconds at nanosecond
-// resolution. It is independent of any calendar and concepts like "day"
-// or "month". It is related to Timestamp in that the difference between
-// two Timestamp values is a Duration and it can be added or subtracted
-// from a Timestamp. Range is approximately +-10,000 years.
-//
-// # Examples
-//
-// Example 1: Compute Duration from two Timestamps in pseudo code.
-//
-// Timestamp start = ...;
-// Timestamp end = ...;
-// Duration duration = ...;
-//
-// duration.seconds = end.seconds - start.seconds;
-// duration.nanos = end.nanos - start.nanos;
-//
-// if (duration.seconds < 0 && duration.nanos > 0) {
-// duration.seconds += 1;
-// duration.nanos -= 1000000000;
-// } else if (duration.seconds > 0 && duration.nanos < 0) {
-// duration.seconds -= 1;
-// duration.nanos += 1000000000;
-// }
-//
-// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
-//
-// Timestamp start = ...;
-// Duration duration = ...;
-// Timestamp end = ...;
-//
-// end.seconds = start.seconds + duration.seconds;
-// end.nanos = start.nanos + duration.nanos;
-//
-// if (end.nanos < 0) {
-// end.seconds -= 1;
-// end.nanos += 1000000000;
-// } else if (end.nanos >= 1000000000) {
-// end.seconds += 1;
-// end.nanos -= 1000000000;
-// }
-//
-// Example 3: Compute Duration from datetime.timedelta in Python.
-//
-// td = datetime.timedelta(days=3, minutes=10)
-// duration = Duration()
-// duration.FromTimedelta(td)
-//
-// # JSON Mapping
-//
-// In JSON format, the Duration type is encoded as a string rather than an
-// object, where the string ends in the suffix "s" (indicating seconds) and
-// is preceded by the number of seconds, with nanoseconds expressed as
-// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
-// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
-// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
-// microsecond should be expressed in JSON format as "3.000001s".
-type Duration struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // Signed seconds of the span of time. Must be from -315,576,000,000
- // to +315,576,000,000 inclusive. Note: these bounds are computed from:
- // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
- Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
- // Signed fractions of a second at nanosecond resolution of the span
- // of time. Durations less than one second are represented with a 0
- // `seconds` field and a positive or negative `nanos` field. For durations
- // of one second or more, a non-zero value for the `nanos` field must be
- // of the same sign as the `seconds` field. Must be from -999,999,999
- // to +999,999,999 inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-// New constructs a new Duration from the provided time.Duration.
-func New(d time.Duration) *Duration {
- nanos := d.Nanoseconds()
- secs := nanos / 1e9
- nanos -= secs * 1e9
- return &Duration{Seconds: int64(secs), Nanos: int32(nanos)}
-}
-
-// AsDuration converts x to a time.Duration,
-// returning the closest duration value in the event of overflow.
-func (x *Duration) AsDuration() time.Duration {
- secs := x.GetSeconds()
- nanos := x.GetNanos()
- d := time.Duration(secs) * time.Second
- overflow := d/time.Second != time.Duration(secs)
- d += time.Duration(nanos) * time.Nanosecond
- overflow = overflow || (secs < 0 && nanos < 0 && d > 0)
- overflow = overflow || (secs > 0 && nanos > 0 && d < 0)
- if overflow {
- switch {
- case secs < 0:
- return time.Duration(math.MinInt64)
- case secs > 0:
- return time.Duration(math.MaxInt64)
- }
- }
- return d
-}
-
-// IsValid reports whether the duration is valid.
-// It is equivalent to CheckValid == nil.
-func (x *Duration) IsValid() bool {
- return x.check() == 0
-}
-
-// CheckValid returns an error if the duration is invalid.
-// In particular, it checks whether the value is within the range of
-// -10000 years to +10000 years inclusive.
-// An error is reported for a nil Duration.
-func (x *Duration) CheckValid() error {
- switch x.check() {
- case invalidNil:
- return protoimpl.X.NewError("invalid nil Duration")
- case invalidUnderflow:
- return protoimpl.X.NewError("duration (%v) exceeds -10000 years", x)
- case invalidOverflow:
- return protoimpl.X.NewError("duration (%v) exceeds +10000 years", x)
- case invalidNanosRange:
- return protoimpl.X.NewError("duration (%v) has out-of-range nanos", x)
- case invalidNanosSign:
- return protoimpl.X.NewError("duration (%v) has seconds and nanos with different signs", x)
- default:
- return nil
- }
-}
-
-const (
- _ = iota
- invalidNil
- invalidUnderflow
- invalidOverflow
- invalidNanosRange
- invalidNanosSign
-)
-
-func (x *Duration) check() uint {
- const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min
- secs := x.GetSeconds()
- nanos := x.GetNanos()
- switch {
- case x == nil:
- return invalidNil
- case secs < -absDuration:
- return invalidUnderflow
- case secs > +absDuration:
- return invalidOverflow
- case nanos <= -1e9 || nanos >= +1e9:
- return invalidNanosRange
- case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0):
- return invalidNanosSign
- default:
- return 0
- }
-}
-
-func (x *Duration) Reset() {
- *x = Duration{}
- mi := &file_google_protobuf_duration_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *Duration) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Duration) ProtoMessage() {}
-
-func (x *Duration) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_duration_proto_msgTypes[0]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Duration.ProtoReflect.Descriptor instead.
-func (*Duration) Descriptor() ([]byte, []int) {
- return file_google_protobuf_duration_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *Duration) GetSeconds() int64 {
- if x != nil {
- return x.Seconds
- }
- return 0
-}
-
-func (x *Duration) GetNanos() int32 {
- if x != nil {
- return x.Nanos
- }
- return 0
-}
-
-var File_google_protobuf_duration_proto protoreflect.FileDescriptor
-
-const file_google_protobuf_duration_proto_rawDesc = "" +
- "\n" +
- "\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\":\n" +
- "\bDuration\x12\x18\n" +
- "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" +
- "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x83\x01\n" +
- "\x13com.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
-
-var (
- file_google_protobuf_duration_proto_rawDescOnce sync.Once
- file_google_protobuf_duration_proto_rawDescData []byte
-)
-
-func file_google_protobuf_duration_proto_rawDescGZIP() []byte {
- file_google_protobuf_duration_proto_rawDescOnce.Do(func() {
- file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc)))
- })
- return file_google_protobuf_duration_proto_rawDescData
-}
-
-var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_duration_proto_goTypes = []any{
- (*Duration)(nil), // 0: google.protobuf.Duration
-}
-var file_google_protobuf_duration_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_google_protobuf_duration_proto_init() }
-func file_google_protobuf_duration_proto_init() {
- if File_google_protobuf_duration_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc)),
- NumEnums: 0,
- NumMessages: 1,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_protobuf_duration_proto_goTypes,
- DependencyIndexes: file_google_protobuf_duration_proto_depIdxs,
- MessageInfos: file_google_protobuf_duration_proto_msgTypes,
- }.Build()
- File_google_protobuf_duration_proto = out.File
- file_google_protobuf_duration_proto_goTypes = nil
- file_google_protobuf_duration_proto_depIdxs = nil
-}
diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
deleted file mode 100644
index 91ee89a5c..000000000
--- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
+++ /dev/null
@@ -1,560 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/field_mask.proto
-
-// Package fieldmaskpb contains generated types for google/protobuf/field_mask.proto.
-//
-// The FieldMask message represents a set of symbolic field paths.
-// The paths are specific to some target message type,
-// which is not stored within the FieldMask message itself.
-//
-// # Constructing a FieldMask
-//
-// The New function is used construct a FieldMask:
-//
-// var messageType *descriptorpb.DescriptorProto
-// fm, err := fieldmaskpb.New(messageType, "field.name", "field.number")
-// if err != nil {
-// ... // handle error
-// }
-// ... // make use of fm
-//
-// The "field.name" and "field.number" paths are valid paths according to the
-// google.protobuf.DescriptorProto message. Use of a path that does not correlate
-// to valid fields reachable from DescriptorProto would result in an error.
-//
-// Once a FieldMask message has been constructed,
-// the Append method can be used to insert additional paths to the path set:
-//
-// var messageType *descriptorpb.DescriptorProto
-// if err := fm.Append(messageType, "options"); err != nil {
-// ... // handle error
-// }
-//
-// # Type checking a FieldMask
-//
-// In order to verify that a FieldMask represents a set of fields that are
-// reachable from some target message type, use the IsValid method:
-//
-// var messageType *descriptorpb.DescriptorProto
-// if fm.IsValid(messageType) {
-// ... // make use of fm
-// }
-//
-// IsValid needs to be passed the target message type as an input since the
-// FieldMask message itself does not store the message type that the set of paths
-// are for.
-package fieldmaskpb
-
-import (
- proto "google.golang.org/protobuf/proto"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sort "sort"
- strings "strings"
- sync "sync"
- unsafe "unsafe"
-)
-
-// `FieldMask` represents a set of symbolic field paths, for example:
-//
-// paths: "f.a"
-// paths: "f.b.d"
-//
-// Here `f` represents a field in some root message, `a` and `b`
-// fields in the message found in `f`, and `d` a field found in the
-// message in `f.b`.
-//
-// Field masks are used to specify a subset of fields that should be
-// returned by a get operation or modified by an update operation.
-// Field masks also have a custom JSON encoding (see below).
-//
-// # Field Masks in Projections
-//
-// When used in the context of a projection, a response message or
-// sub-message is filtered by the API to only contain those fields as
-// specified in the mask. For example, if the mask in the previous
-// example is applied to a response message as follows:
-//
-// f {
-// a : 22
-// b {
-// d : 1
-// x : 2
-// }
-// y : 13
-// }
-// z: 8
-//
-// The result will not contain specific values for fields x,y and z
-// (their value will be set to the default, and omitted in proto text
-// output):
-//
-// f {
-// a : 22
-// b {
-// d : 1
-// }
-// }
-//
-// A repeated field is not allowed except at the last position of a
-// paths string.
-//
-// If a FieldMask object is not present in a get operation, the
-// operation applies to all fields (as if a FieldMask of all fields
-// had been specified).
-//
-// Note that a field mask does not necessarily apply to the
-// top-level response message. In case of a REST get operation, the
-// field mask applies directly to the response, but in case of a REST
-// list operation, the mask instead applies to each individual message
-// in the returned resource list. In case of a REST custom method,
-// other definitions may be used. Where the mask applies will be
-// clearly documented together with its declaration in the API. In
-// any case, the effect on the returned resource/resources is required
-// behavior for APIs.
-//
-// # Field Masks in Update Operations
-//
-// A field mask in update operations specifies which fields of the
-// targeted resource are going to be updated. The API is required
-// to only change the values of the fields as specified in the mask
-// and leave the others untouched. If a resource is passed in to
-// describe the updated values, the API ignores the values of all
-// fields not covered by the mask.
-//
-// If a repeated field is specified for an update operation, new values will
-// be appended to the existing repeated field in the target resource. Note that
-// a repeated field is only allowed in the last position of a `paths` string.
-//
-// If a sub-message is specified in the last position of the field mask for an
-// update operation, then new value will be merged into the existing sub-message
-// in the target resource.
-//
-// For example, given the target message:
-//
-// f {
-// b {
-// d: 1
-// x: 2
-// }
-// c: [1]
-// }
-//
-// And an update message:
-//
-// f {
-// b {
-// d: 10
-// }
-// c: [2]
-// }
-//
-// then if the field mask is:
-//
-// paths: ["f.b", "f.c"]
-//
-// then the result will be:
-//
-// f {
-// b {
-// d: 10
-// x: 2
-// }
-// c: [1, 2]
-// }
-//
-// An implementation may provide options to override this default behavior for
-// repeated and message fields.
-//
-// In order to reset a field's value to the default, the field must
-// be in the mask and set to the default value in the provided resource.
-// Hence, in order to reset all fields of a resource, provide a default
-// instance of the resource and set all fields in the mask, or do
-// not provide a mask as described below.
-//
-// If a field mask is not present on update, the operation applies to
-// all fields (as if a field mask of all fields has been specified).
-// Note that in the presence of schema evolution, this may mean that
-// fields the client does not know and has therefore not filled into
-// the request will be reset to their default. If this is unwanted
-// behavior, a specific service may require a client to always specify
-// a field mask, producing an error if not.
-//
-// As with get operations, the location of the resource which
-// describes the updated values in the request message depends on the
-// operation kind. In any case, the effect of the field mask is
-// required to be honored by the API.
-//
-// ## Considerations for HTTP REST
-//
-// The HTTP kind of an update operation which uses a field mask must
-// be set to PATCH instead of PUT in order to satisfy HTTP semantics
-// (PUT must only be used for full updates).
-//
-// # JSON Encoding of Field Masks
-//
-// In JSON, a field mask is encoded as a single string where paths are
-// separated by a comma. Fields name in each path are converted
-// to/from lower-camel naming conventions.
-//
-// As an example, consider the following message declarations:
-//
-// message Profile {
-// User user = 1;
-// Photo photo = 2;
-// }
-// message User {
-// string display_name = 1;
-// string address = 2;
-// }
-//
-// In proto a field mask for `Profile` may look as such:
-//
-// mask {
-// paths: "user.display_name"
-// paths: "photo"
-// }
-//
-// In JSON, the same mask is represented as below:
-//
-// {
-// mask: "user.displayName,photo"
-// }
-//
-// # Field Masks and Oneof Fields
-//
-// Field masks treat fields in oneofs just as regular fields. Consider the
-// following message:
-//
-// message SampleMessage {
-// oneof test_oneof {
-// string name = 4;
-// SubMessage sub_message = 9;
-// }
-// }
-//
-// The field mask can be:
-//
-// mask {
-// paths: "name"
-// }
-//
-// Or:
-//
-// mask {
-// paths: "sub_message"
-// }
-//
-// Note that oneof type names ("test_oneof" in this case) cannot be used in
-// paths.
-//
-// ## Field Mask Verification
-//
-// The implementation of any API method which has a FieldMask type field in the
-// request should verify the included field paths, and return an
-// `INVALID_ARGUMENT` error if any path is unmappable.
-type FieldMask struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // The set of field mask paths.
- Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-// New constructs a field mask from a list of paths and verifies that
-// each one is valid according to the specified message type.
-func New(m proto.Message, paths ...string) (*FieldMask, error) {
- x := new(FieldMask)
- return x, x.Append(m, paths...)
-}
-
-// Union returns the union of all the paths in the input field masks.
-func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {
- var out []string
- out = append(out, mx.GetPaths()...)
- out = append(out, my.GetPaths()...)
- for _, m := range ms {
- out = append(out, m.GetPaths()...)
- }
- return &FieldMask{Paths: normalizePaths(out)}
-}
-
-// Intersect returns the intersection of all the paths in the input field masks.
-func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {
- var ss1, ss2 []string // reused buffers for performance
- intersect := func(out, in []string) []string {
- ss1 = normalizePaths(append(ss1[:0], in...))
- ss2 = normalizePaths(append(ss2[:0], out...))
- out = out[:0]
- for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); {
- switch s1, s2 := ss1[i1], ss2[i2]; {
- case hasPathPrefix(s1, s2):
- out = append(out, s1)
- i1++
- case hasPathPrefix(s2, s1):
- out = append(out, s2)
- i2++
- case lessPath(s1, s2):
- i1++
- case lessPath(s2, s1):
- i2++
- }
- }
- return out
- }
-
- out := Union(mx, my, ms...).GetPaths()
- out = intersect(out, mx.GetPaths())
- out = intersect(out, my.GetPaths())
- for _, m := range ms {
- out = intersect(out, m.GetPaths())
- }
- return &FieldMask{Paths: normalizePaths(out)}
-}
-
-// IsValid reports whether all the paths are syntactically valid and
-// refer to known fields in the specified message type.
-// It reports false for a nil FieldMask.
-func (x *FieldMask) IsValid(m proto.Message) bool {
- paths := x.GetPaths()
- return x != nil && numValidPaths(m, paths) == len(paths)
-}
-
-// Append appends a list of paths to the mask and verifies that each one
-// is valid according to the specified message type.
-// An invalid path is not appended and breaks insertion of subsequent paths.
-func (x *FieldMask) Append(m proto.Message, paths ...string) error {
- numValid := numValidPaths(m, paths)
- x.Paths = append(x.Paths, paths[:numValid]...)
- paths = paths[numValid:]
- if len(paths) > 0 {
- name := m.ProtoReflect().Descriptor().FullName()
- return protoimpl.X.NewError("invalid path %q for message %q", paths[0], name)
- }
- return nil
-}
-
-func numValidPaths(m proto.Message, paths []string) int {
- md0 := m.ProtoReflect().Descriptor()
- for i, path := range paths {
- md := md0
- if !rangeFields(path, func(field string) bool {
- // Search the field within the message.
- if md == nil {
- return false // not within a message
- }
- fd := md.Fields().ByName(protoreflect.Name(field))
- // The real field name of a group is the message name.
- if fd == nil {
- gd := md.Fields().ByName(protoreflect.Name(strings.ToLower(field)))
- if gd != nil && gd.Kind() == protoreflect.GroupKind && string(gd.Message().Name()) == field {
- fd = gd
- }
- } else if fd.Kind() == protoreflect.GroupKind && string(fd.Message().Name()) != field {
- fd = nil
- }
- if fd == nil {
- return false // message has does not have this field
- }
-
- // Identify the next message to search within.
- md = fd.Message() // may be nil
-
- // Repeated fields are only allowed at the last position.
- if fd.IsList() || fd.IsMap() {
- md = nil
- }
-
- return true
- }) {
- return i
- }
- }
- return len(paths)
-}
-
-// Normalize converts the mask to its canonical form where all paths are sorted
-// and redundant paths are removed.
-func (x *FieldMask) Normalize() {
- x.Paths = normalizePaths(x.Paths)
-}
-
-func normalizePaths(paths []string) []string {
- sort.Slice(paths, func(i, j int) bool {
- return lessPath(paths[i], paths[j])
- })
-
- // Elide any path that is a prefix match on the previous.
- out := paths[:0]
- for _, path := range paths {
- if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) {
- continue
- }
- out = append(out, path)
- }
- return out
-}
-
-// hasPathPrefix is like strings.HasPrefix, but further checks for either
-// an exact matche or that the prefix is delimited by a dot.
-func hasPathPrefix(path, prefix string) bool {
- return strings.HasPrefix(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.')
-}
-
-// lessPath is a lexicographical comparison where dot is specially treated
-// as the smallest symbol.
-func lessPath(x, y string) bool {
- for i := 0; i < len(x) && i < len(y); i++ {
- if x[i] != y[i] {
- return (x[i] - '.') < (y[i] - '.')
- }
- }
- return len(x) < len(y)
-}
-
-// rangeFields is like strings.Split(path, "."), but avoids allocations by
-// iterating over each field in place and calling a iterator function.
-func rangeFields(path string, f func(field string) bool) bool {
- for {
- var field string
- if i := strings.IndexByte(path, '.'); i >= 0 {
- field, path = path[:i], path[i:]
- } else {
- field, path = path, ""
- }
-
- if !f(field) {
- return false
- }
-
- if len(path) == 0 {
- return true
- }
- path = strings.TrimPrefix(path, ".")
- }
-}
-
-func (x *FieldMask) Reset() {
- *x = FieldMask{}
- mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *FieldMask) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FieldMask) ProtoMessage() {}
-
-func (x *FieldMask) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FieldMask.ProtoReflect.Descriptor instead.
-func (*FieldMask) Descriptor() ([]byte, []int) {
- return file_google_protobuf_field_mask_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *FieldMask) GetPaths() []string {
- if x != nil {
- return x.Paths
- }
- return nil
-}
-
-var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor
-
-const file_google_protobuf_field_mask_proto_rawDesc = "" +
- "\n" +
- " google/protobuf/field_mask.proto\x12\x0fgoogle.protobuf\"!\n" +
- "\tFieldMask\x12\x14\n" +
- "\x05paths\x18\x01 \x03(\tR\x05pathsB\x85\x01\n" +
- "\x13com.google.protobufB\x0eFieldMaskProtoP\x01Z2google.golang.org/protobuf/types/known/fieldmaskpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
-
-var (
- file_google_protobuf_field_mask_proto_rawDescOnce sync.Once
- file_google_protobuf_field_mask_proto_rawDescData []byte
-)
-
-func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte {
- file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() {
- file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc)))
- })
- return file_google_protobuf_field_mask_proto_rawDescData
-}
-
-var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_field_mask_proto_goTypes = []any{
- (*FieldMask)(nil), // 0: google.protobuf.FieldMask
-}
-var file_google_protobuf_field_mask_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_google_protobuf_field_mask_proto_init() }
-func file_google_protobuf_field_mask_proto_init() {
- if File_google_protobuf_field_mask_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc)),
- NumEnums: 0,
- NumMessages: 1,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_protobuf_field_mask_proto_goTypes,
- DependencyIndexes: file_google_protobuf_field_mask_proto_depIdxs,
- MessageInfos: file_google_protobuf_field_mask_proto_msgTypes,
- }.Build()
- File_google_protobuf_field_mask_proto = out.File
- file_google_protobuf_field_mask_proto_goTypes = nil
- file_google_protobuf_field_mask_proto_depIdxs = nil
-}
diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
deleted file mode 100644
index 30411b728..000000000
--- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
+++ /dev/null
@@ -1,767 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/struct.proto
-
-// Package structpb contains generated types for google/protobuf/struct.proto.
-//
-// The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are
-// used to represent arbitrary JSON. The Value message represents a JSON value,
-// the Struct message represents a JSON object, and the ListValue message
-// represents a JSON array. See https://json.org for more information.
-//
-// The Value, Struct, and ListValue types have generated MarshalJSON and
-// UnmarshalJSON methods such that they serialize JSON equivalent to what the
-// messages themselves represent. Use of these types with the
-// "google.golang.org/protobuf/encoding/protojson" package
-// ensures that they will be serialized as their JSON equivalent.
-//
-// # Conversion to and from a Go interface
-//
-// The standard Go "encoding/json" package has functionality to serialize
-// arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and
-// ListValue.AsSlice methods can convert the protobuf message representation into
-// a form represented by any, map[string]any, and []any.
-// This form can be used with other packages that operate on such data structures
-// and also directly with the standard json package.
-//
-// In order to convert the any, map[string]any, and []any
-// forms back as Value, Struct, and ListValue messages, use the NewStruct,
-// NewList, and NewValue constructor functions.
-//
-// # Example usage
-//
-// Consider the following example JSON object:
-//
-// {
-// "firstName": "John",
-// "lastName": "Smith",
-// "isAlive": true,
-// "age": 27,
-// "address": {
-// "streetAddress": "21 2nd Street",
-// "city": "New York",
-// "state": "NY",
-// "postalCode": "10021-3100"
-// },
-// "phoneNumbers": [
-// {
-// "type": "home",
-// "number": "212 555-1234"
-// },
-// {
-// "type": "office",
-// "number": "646 555-4567"
-// }
-// ],
-// "children": [],
-// "spouse": null
-// }
-//
-// To construct a Value message representing the above JSON object:
-//
-// m, err := structpb.NewValue(map[string]any{
-// "firstName": "John",
-// "lastName": "Smith",
-// "isAlive": true,
-// "age": 27,
-// "address": map[string]any{
-// "streetAddress": "21 2nd Street",
-// "city": "New York",
-// "state": "NY",
-// "postalCode": "10021-3100",
-// },
-// "phoneNumbers": []any{
-// map[string]any{
-// "type": "home",
-// "number": "212 555-1234",
-// },
-// map[string]any{
-// "type": "office",
-// "number": "646 555-4567",
-// },
-// },
-// "children": []any{},
-// "spouse": nil,
-// })
-// if err != nil {
-// ... // handle error
-// }
-// ... // make use of m as a *structpb.Value
-package structpb
-
-import (
- base64 "encoding/base64"
- json "encoding/json"
- protojson "google.golang.org/protobuf/encoding/protojson"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- math "math"
- reflect "reflect"
- sync "sync"
- utf8 "unicode/utf8"
- unsafe "unsafe"
-)
-
-// `NullValue` is a singleton enumeration to represent the null value for the
-// `Value` type union.
-//
-// The JSON representation for `NullValue` is JSON `null`.
-type NullValue int32
-
-const (
- // Null value.
- NullValue_NULL_VALUE NullValue = 0
-)
-
-// Enum value maps for NullValue.
-var (
- NullValue_name = map[int32]string{
- 0: "NULL_VALUE",
- }
- NullValue_value = map[string]int32{
- "NULL_VALUE": 0,
- }
-)
-
-func (x NullValue) Enum() *NullValue {
- p := new(NullValue)
- *p = x
- return p
-}
-
-func (x NullValue) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (NullValue) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_struct_proto_enumTypes[0].Descriptor()
-}
-
-func (NullValue) Type() protoreflect.EnumType {
- return &file_google_protobuf_struct_proto_enumTypes[0]
-}
-
-func (x NullValue) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use NullValue.Descriptor instead.
-func (NullValue) EnumDescriptor() ([]byte, []int) {
- return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0}
-}
-
-// `Struct` represents a structured data value, consisting of fields
-// which map to dynamically typed values. In some languages, `Struct`
-// might be supported by a native representation. For example, in
-// scripting languages like JS a struct is represented as an
-// object. The details of that representation are described together
-// with the proto support for the language.
-//
-// The JSON representation for `Struct` is JSON object.
-type Struct struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // Unordered map of dynamically typed values.
- Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-// NewStruct constructs a Struct from a general-purpose Go map.
-// The map keys must be valid UTF-8.
-// The map values are converted using NewValue.
-func NewStruct(v map[string]any) (*Struct, error) {
- x := &Struct{Fields: make(map[string]*Value, len(v))}
- for k, v := range v {
- if !utf8.ValidString(k) {
- return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", k)
- }
- var err error
- x.Fields[k], err = NewValue(v)
- if err != nil {
- return nil, err
- }
- }
- return x, nil
-}
-
-// AsMap converts x to a general-purpose Go map.
-// The map values are converted by calling Value.AsInterface.
-func (x *Struct) AsMap() map[string]any {
- f := x.GetFields()
- vs := make(map[string]any, len(f))
- for k, v := range f {
- vs[k] = v.AsInterface()
- }
- return vs
-}
-
-func (x *Struct) MarshalJSON() ([]byte, error) {
- return protojson.Marshal(x)
-}
-
-func (x *Struct) UnmarshalJSON(b []byte) error {
- return protojson.Unmarshal(b, x)
-}
-
-func (x *Struct) Reset() {
- *x = Struct{}
- mi := &file_google_protobuf_struct_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *Struct) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Struct) ProtoMessage() {}
-
-func (x *Struct) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_struct_proto_msgTypes[0]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Struct.ProtoReflect.Descriptor instead.
-func (*Struct) Descriptor() ([]byte, []int) {
- return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *Struct) GetFields() map[string]*Value {
- if x != nil {
- return x.Fields
- }
- return nil
-}
-
-// `Value` represents a dynamically typed value which can be either
-// null, a number, a string, a boolean, a recursive struct value, or a
-// list of values. A producer of value is expected to set one of these
-// variants. Absence of any variant indicates an error.
-//
-// The JSON representation for `Value` is JSON value.
-type Value struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // The kind of value.
- //
- // Types that are valid to be assigned to Kind:
- //
- // *Value_NullValue
- // *Value_NumberValue
- // *Value_StringValue
- // *Value_BoolValue
- // *Value_StructValue
- // *Value_ListValue
- Kind isValue_Kind `protobuf_oneof:"kind"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-// NewValue constructs a Value from a general-purpose Go interface.
-//
-// ╔═══════════════════════════════════════╤════════════════════════════════════════════╗
-// ║ Go type │ Conversion ║
-// ╠═══════════════════════════════════════╪════════════════════════════════════════════╣
-// ║ nil │ stored as NullValue ║
-// ║ bool │ stored as BoolValue ║
-// ║ int, int8, int16, int32, int64 │ stored as NumberValue ║
-// ║ uint, uint8, uint16, uint32, uint64 │ stored as NumberValue ║
-// ║ float32, float64 │ stored as NumberValue ║
-// ║ json.Number │ stored as NumberValue ║
-// ║ string │ stored as StringValue; must be valid UTF-8 ║
-// ║ []byte │ stored as StringValue; base64-encoded ║
-// ║ map[string]any │ stored as StructValue ║
-// ║ []any │ stored as ListValue ║
-// ╚═══════════════════════════════════════╧════════════════════════════════════════════╝
-//
-// When converting an int64 or uint64 to a NumberValue, numeric precision loss
-// is possible since they are stored as a float64.
-func NewValue(v any) (*Value, error) {
- switch v := v.(type) {
- case nil:
- return NewNullValue(), nil
- case bool:
- return NewBoolValue(v), nil
- case int:
- return NewNumberValue(float64(v)), nil
- case int8:
- return NewNumberValue(float64(v)), nil
- case int16:
- return NewNumberValue(float64(v)), nil
- case int32:
- return NewNumberValue(float64(v)), nil
- case int64:
- return NewNumberValue(float64(v)), nil
- case uint:
- return NewNumberValue(float64(v)), nil
- case uint8:
- return NewNumberValue(float64(v)), nil
- case uint16:
- return NewNumberValue(float64(v)), nil
- case uint32:
- return NewNumberValue(float64(v)), nil
- case uint64:
- return NewNumberValue(float64(v)), nil
- case float32:
- return NewNumberValue(float64(v)), nil
- case float64:
- return NewNumberValue(float64(v)), nil
- case json.Number:
- n, err := v.Float64()
- if err != nil {
- return nil, protoimpl.X.NewError("invalid number format %q, expected a float64: %v", v, err)
- }
- return NewNumberValue(n), nil
- case string:
- if !utf8.ValidString(v) {
- return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v)
- }
- return NewStringValue(v), nil
- case []byte:
- s := base64.StdEncoding.EncodeToString(v)
- return NewStringValue(s), nil
- case map[string]any:
- v2, err := NewStruct(v)
- if err != nil {
- return nil, err
- }
- return NewStructValue(v2), nil
- case []any:
- v2, err := NewList(v)
- if err != nil {
- return nil, err
- }
- return NewListValue(v2), nil
- default:
- return nil, protoimpl.X.NewError("invalid type: %T", v)
- }
-}
-
-// NewNullValue constructs a new null Value.
-func NewNullValue() *Value {
- return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}}
-}
-
-// NewBoolValue constructs a new boolean Value.
-func NewBoolValue(v bool) *Value {
- return &Value{Kind: &Value_BoolValue{BoolValue: v}}
-}
-
-// NewNumberValue constructs a new number Value.
-func NewNumberValue(v float64) *Value {
- return &Value{Kind: &Value_NumberValue{NumberValue: v}}
-}
-
-// NewStringValue constructs a new string Value.
-func NewStringValue(v string) *Value {
- return &Value{Kind: &Value_StringValue{StringValue: v}}
-}
-
-// NewStructValue constructs a new struct Value.
-func NewStructValue(v *Struct) *Value {
- return &Value{Kind: &Value_StructValue{StructValue: v}}
-}
-
-// NewListValue constructs a new list Value.
-func NewListValue(v *ListValue) *Value {
- return &Value{Kind: &Value_ListValue{ListValue: v}}
-}
-
-// AsInterface converts x to a general-purpose Go interface.
-//
-// Calling Value.MarshalJSON and "encoding/json".Marshal on this output produce
-// semantically equivalent JSON (assuming no errors occur).
-//
-// Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are
-// converted as strings to remain compatible with MarshalJSON.
-func (x *Value) AsInterface() any {
- switch v := x.GetKind().(type) {
- case *Value_NumberValue:
- if v != nil {
- switch {
- case math.IsNaN(v.NumberValue):
- return "NaN"
- case math.IsInf(v.NumberValue, +1):
- return "Infinity"
- case math.IsInf(v.NumberValue, -1):
- return "-Infinity"
- default:
- return v.NumberValue
- }
- }
- case *Value_StringValue:
- if v != nil {
- return v.StringValue
- }
- case *Value_BoolValue:
- if v != nil {
- return v.BoolValue
- }
- case *Value_StructValue:
- if v != nil {
- return v.StructValue.AsMap()
- }
- case *Value_ListValue:
- if v != nil {
- return v.ListValue.AsSlice()
- }
- }
- return nil
-}
-
-func (x *Value) MarshalJSON() ([]byte, error) {
- return protojson.Marshal(x)
-}
-
-func (x *Value) UnmarshalJSON(b []byte) error {
- return protojson.Unmarshal(b, x)
-}
-
-func (x *Value) Reset() {
- *x = Value{}
- mi := &file_google_protobuf_struct_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *Value) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Value) ProtoMessage() {}
-
-func (x *Value) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_struct_proto_msgTypes[1]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Value.ProtoReflect.Descriptor instead.
-func (*Value) Descriptor() ([]byte, []int) {
- return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *Value) GetKind() isValue_Kind {
- if x != nil {
- return x.Kind
- }
- return nil
-}
-
-func (x *Value) GetNullValue() NullValue {
- if x != nil {
- if x, ok := x.Kind.(*Value_NullValue); ok {
- return x.NullValue
- }
- }
- return NullValue_NULL_VALUE
-}
-
-func (x *Value) GetNumberValue() float64 {
- if x != nil {
- if x, ok := x.Kind.(*Value_NumberValue); ok {
- return x.NumberValue
- }
- }
- return 0
-}
-
-func (x *Value) GetStringValue() string {
- if x != nil {
- if x, ok := x.Kind.(*Value_StringValue); ok {
- return x.StringValue
- }
- }
- return ""
-}
-
-func (x *Value) GetBoolValue() bool {
- if x != nil {
- if x, ok := x.Kind.(*Value_BoolValue); ok {
- return x.BoolValue
- }
- }
- return false
-}
-
-func (x *Value) GetStructValue() *Struct {
- if x != nil {
- if x, ok := x.Kind.(*Value_StructValue); ok {
- return x.StructValue
- }
- }
- return nil
-}
-
-func (x *Value) GetListValue() *ListValue {
- if x != nil {
- if x, ok := x.Kind.(*Value_ListValue); ok {
- return x.ListValue
- }
- }
- return nil
-}
-
-type isValue_Kind interface {
- isValue_Kind()
-}
-
-type Value_NullValue struct {
- // Represents a null value.
- NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
-}
-
-type Value_NumberValue struct {
- // Represents a double value.
- NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
-}
-
-type Value_StringValue struct {
- // Represents a string value.
- StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
-}
-
-type Value_BoolValue struct {
- // Represents a boolean value.
- BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
-}
-
-type Value_StructValue struct {
- // Represents a structured value.
- StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
-}
-
-type Value_ListValue struct {
- // Represents a repeated `Value`.
- ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
-}
-
-func (*Value_NullValue) isValue_Kind() {}
-
-func (*Value_NumberValue) isValue_Kind() {}
-
-func (*Value_StringValue) isValue_Kind() {}
-
-func (*Value_BoolValue) isValue_Kind() {}
-
-func (*Value_StructValue) isValue_Kind() {}
-
-func (*Value_ListValue) isValue_Kind() {}
-
-// `ListValue` is a wrapper around a repeated field of values.
-//
-// The JSON representation for `ListValue` is JSON array.
-type ListValue struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // Repeated field of dynamically typed values.
- Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-// NewList constructs a ListValue from a general-purpose Go slice.
-// The slice elements are converted using NewValue.
-func NewList(v []any) (*ListValue, error) {
- x := &ListValue{Values: make([]*Value, len(v))}
- for i, v := range v {
- var err error
- x.Values[i], err = NewValue(v)
- if err != nil {
- return nil, err
- }
- }
- return x, nil
-}
-
-// AsSlice converts x to a general-purpose Go slice.
-// The slice elements are converted by calling Value.AsInterface.
-func (x *ListValue) AsSlice() []any {
- vals := x.GetValues()
- vs := make([]any, len(vals))
- for i, v := range vals {
- vs[i] = v.AsInterface()
- }
- return vs
-}
-
-func (x *ListValue) MarshalJSON() ([]byte, error) {
- return protojson.Marshal(x)
-}
-
-func (x *ListValue) UnmarshalJSON(b []byte) error {
- return protojson.Unmarshal(b, x)
-}
-
-func (x *ListValue) Reset() {
- *x = ListValue{}
- mi := &file_google_protobuf_struct_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *ListValue) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListValue) ProtoMessage() {}
-
-func (x *ListValue) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_struct_proto_msgTypes[2]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListValue.ProtoReflect.Descriptor instead.
-func (*ListValue) Descriptor() ([]byte, []int) {
- return file_google_protobuf_struct_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *ListValue) GetValues() []*Value {
- if x != nil {
- return x.Values
- }
- return nil
-}
-
-var File_google_protobuf_struct_proto protoreflect.FileDescriptor
-
-const file_google_protobuf_struct_proto_rawDesc = "" +
- "\n" +
- "\x1cgoogle/protobuf/struct.proto\x12\x0fgoogle.protobuf\"\x98\x01\n" +
- "\x06Struct\x12;\n" +
- "\x06fields\x18\x01 \x03(\v2#.google.protobuf.Struct.FieldsEntryR\x06fields\x1aQ\n" +
- "\vFieldsEntry\x12\x10\n" +
- "\x03key\x18\x01 \x01(\tR\x03key\x12,\n" +
- "\x05value\x18\x02 \x01(\v2\x16.google.protobuf.ValueR\x05value:\x028\x01\"\xb2\x02\n" +
- "\x05Value\x12;\n" +
- "\n" +
- "null_value\x18\x01 \x01(\x0e2\x1a.google.protobuf.NullValueH\x00R\tnullValue\x12#\n" +
- "\fnumber_value\x18\x02 \x01(\x01H\x00R\vnumberValue\x12#\n" +
- "\fstring_value\x18\x03 \x01(\tH\x00R\vstringValue\x12\x1f\n" +
- "\n" +
- "bool_value\x18\x04 \x01(\bH\x00R\tboolValue\x12<\n" +
- "\fstruct_value\x18\x05 \x01(\v2\x17.google.protobuf.StructH\x00R\vstructValue\x12;\n" +
- "\n" +
- "list_value\x18\x06 \x01(\v2\x1a.google.protobuf.ListValueH\x00R\tlistValueB\x06\n" +
- "\x04kind\";\n" +
- "\tListValue\x12.\n" +
- "\x06values\x18\x01 \x03(\v2\x16.google.protobuf.ValueR\x06values*\x1b\n" +
- "\tNullValue\x12\x0e\n" +
- "\n" +
- "NULL_VALUE\x10\x00B\x7f\n" +
- "\x13com.google.protobufB\vStructProtoP\x01Z/google.golang.org/protobuf/types/known/structpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
-
-var (
- file_google_protobuf_struct_proto_rawDescOnce sync.Once
- file_google_protobuf_struct_proto_rawDescData []byte
-)
-
-func file_google_protobuf_struct_proto_rawDescGZIP() []byte {
- file_google_protobuf_struct_proto_rawDescOnce.Do(func() {
- file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc)))
- })
- return file_google_protobuf_struct_proto_rawDescData
-}
-
-var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
-var file_google_protobuf_struct_proto_goTypes = []any{
- (NullValue)(0), // 0: google.protobuf.NullValue
- (*Struct)(nil), // 1: google.protobuf.Struct
- (*Value)(nil), // 2: google.protobuf.Value
- (*ListValue)(nil), // 3: google.protobuf.ListValue
- nil, // 4: google.protobuf.Struct.FieldsEntry
-}
-var file_google_protobuf_struct_proto_depIdxs = []int32{
- 4, // 0: google.protobuf.Struct.fields:type_name -> google.protobuf.Struct.FieldsEntry
- 0, // 1: google.protobuf.Value.null_value:type_name -> google.protobuf.NullValue
- 1, // 2: google.protobuf.Value.struct_value:type_name -> google.protobuf.Struct
- 3, // 3: google.protobuf.Value.list_value:type_name -> google.protobuf.ListValue
- 2, // 4: google.protobuf.ListValue.values:type_name -> google.protobuf.Value
- 2, // 5: google.protobuf.Struct.FieldsEntry.value:type_name -> google.protobuf.Value
- 6, // [6:6] is the sub-list for method output_type
- 6, // [6:6] is the sub-list for method input_type
- 6, // [6:6] is the sub-list for extension type_name
- 6, // [6:6] is the sub-list for extension extendee
- 0, // [0:6] is the sub-list for field type_name
-}
-
-func init() { file_google_protobuf_struct_proto_init() }
-func file_google_protobuf_struct_proto_init() {
- if File_google_protobuf_struct_proto != nil {
- return
- }
- file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{
- (*Value_NullValue)(nil),
- (*Value_NumberValue)(nil),
- (*Value_StringValue)(nil),
- (*Value_BoolValue)(nil),
- (*Value_StructValue)(nil),
- (*Value_ListValue)(nil),
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc)),
- NumEnums: 1,
- NumMessages: 4,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_protobuf_struct_proto_goTypes,
- DependencyIndexes: file_google_protobuf_struct_proto_depIdxs,
- EnumInfos: file_google_protobuf_struct_proto_enumTypes,
- MessageInfos: file_google_protobuf_struct_proto_msgTypes,
- }.Build()
- File_google_protobuf_struct_proto = out.File
- file_google_protobuf_struct_proto_goTypes = nil
- file_google_protobuf_struct_proto_depIdxs = nil
-}
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
deleted file mode 100644
index 06d584c14..000000000
--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ /dev/null
@@ -1,355 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/timestamp.proto
-
-// Package timestamppb contains generated types for google/protobuf/timestamp.proto.
-//
-// The Timestamp message represents a timestamp,
-// an instant in time since the Unix epoch (January 1st, 1970).
-//
-// # Conversion to a Go Time
-//
-// The AsTime method can be used to convert a Timestamp message to a
-// standard Go time.Time value in UTC:
-//
-// t := ts.AsTime()
-// ... // make use of t as a time.Time
-//
-// Converting to a time.Time is a common operation so that the extensive
-// set of time-based operations provided by the time package can be leveraged.
-// See https://golang.org/pkg/time for more information.
-//
-// The AsTime method performs the conversion on a best-effort basis. Timestamps
-// with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive)
-// are normalized during the conversion to a time.Time. To manually check for
-// invalid Timestamps per the documented limitations in timestamp.proto,
-// additionally call the CheckValid method:
-//
-// if err := ts.CheckValid(); err != nil {
-// ... // handle error
-// }
-//
-// # Conversion from a Go Time
-//
-// The timestamppb.New function can be used to construct a Timestamp message
-// from a standard Go time.Time value:
-//
-// ts := timestamppb.New(t)
-// ... // make use of ts as a *timestamppb.Timestamp
-//
-// In order to construct a Timestamp representing the current time, use Now:
-//
-// ts := timestamppb.Now()
-// ... // make use of ts as a *timestamppb.Timestamp
-package timestamppb
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
- time "time"
- unsafe "unsafe"
-)
-
-// A Timestamp represents a point in time independent of any time zone or local
-// calendar, encoded as a count of seconds and fractions of seconds at
-// nanosecond resolution. The count is relative to an epoch at UTC midnight on
-// January 1, 1970, in the proleptic Gregorian calendar which extends the
-// Gregorian calendar backwards to year one.
-//
-// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
-// second table is needed for interpretation, using a [24-hour linear
-// smear](https://developers.google.com/time/smear).
-//
-// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
-// restricting to that range, we ensure that we can convert to and from [RFC
-// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
-//
-// # Examples
-//
-// Example 1: Compute Timestamp from POSIX `time()`.
-//
-// Timestamp timestamp;
-// timestamp.set_seconds(time(NULL));
-// timestamp.set_nanos(0);
-//
-// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
-//
-// struct timeval tv;
-// gettimeofday(&tv, NULL);
-//
-// Timestamp timestamp;
-// timestamp.set_seconds(tv.tv_sec);
-// timestamp.set_nanos(tv.tv_usec * 1000);
-//
-// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
-//
-// FILETIME ft;
-// GetSystemTimeAsFileTime(&ft);
-// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
-//
-// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
-// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
-// Timestamp timestamp;
-// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
-// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
-//
-// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
-//
-// long millis = System.currentTimeMillis();
-//
-// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
-// .setNanos((int) ((millis % 1000) * 1000000)).build();
-//
-// Example 5: Compute Timestamp from Java `Instant.now()`.
-//
-// Instant now = Instant.now();
-//
-// Timestamp timestamp =
-// Timestamp.newBuilder().setSeconds(now.getEpochSecond())
-// .setNanos(now.getNano()).build();
-//
-// Example 6: Compute Timestamp from current time in Python.
-//
-// timestamp = Timestamp()
-// timestamp.GetCurrentTime()
-//
-// # JSON Mapping
-//
-// In JSON format, the Timestamp type is encoded as a string in the
-// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
-// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
-// where {year} is always expressed using four digits while {month}, {day},
-// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
-// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
-// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
-// is required. A proto3 JSON serializer should always use UTC (as indicated by
-// "Z") when printing the Timestamp type and a proto3 JSON parser should be
-// able to accept both UTC and other timezones (as indicated by an offset).
-//
-// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
-// 01:30 UTC on January 15, 2017.
-//
-// In JavaScript, one can convert a Date object to this format using the
-// standard
-// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
-// method. In Python, a standard `datetime.datetime` object can be converted
-// to this format using
-// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
-// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
-// the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()
-// ) to obtain a formatter capable of generating timestamps in this format.
-type Timestamp struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // Represents seconds of UTC time since Unix epoch
- // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
- // 9999-12-31T23:59:59Z inclusive.
- Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
- // Non-negative fractions of a second at nanosecond resolution. Negative
- // second values with fractions must still have non-negative nanos values
- // that count forward in time. Must be from 0 to 999,999,999
- // inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-// Now constructs a new Timestamp from the current time.
-func Now() *Timestamp {
- return New(time.Now())
-}
-
-// New constructs a new Timestamp from the provided time.Time.
-func New(t time.Time) *Timestamp {
- return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())}
-}
-
-// AsTime converts x to a time.Time.
-func (x *Timestamp) AsTime() time.Time {
- return time.Unix(int64(x.GetSeconds()), int64(x.GetNanos())).UTC()
-}
-
-// IsValid reports whether the timestamp is valid.
-// It is equivalent to CheckValid == nil.
-func (x *Timestamp) IsValid() bool {
- return x.check() == 0
-}
-
-// CheckValid returns an error if the timestamp is invalid.
-// In particular, it checks whether the value represents a date that is
-// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.
-// An error is reported for a nil Timestamp.
-func (x *Timestamp) CheckValid() error {
- switch x.check() {
- case invalidNil:
- return protoimpl.X.NewError("invalid nil Timestamp")
- case invalidUnderflow:
- return protoimpl.X.NewError("timestamp (%v) before 0001-01-01", x)
- case invalidOverflow:
- return protoimpl.X.NewError("timestamp (%v) after 9999-12-31", x)
- case invalidNanos:
- return protoimpl.X.NewError("timestamp (%v) has out-of-range nanos", x)
- default:
- return nil
- }
-}
-
-const (
- _ = iota
- invalidNil
- invalidUnderflow
- invalidOverflow
- invalidNanos
-)
-
-func (x *Timestamp) check() uint {
- const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive
- const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive
- secs := x.GetSeconds()
- nanos := x.GetNanos()
- switch {
- case x == nil:
- return invalidNil
- case secs < minTimestamp:
- return invalidUnderflow
- case secs > maxTimestamp:
- return invalidOverflow
- case nanos < 0 || nanos >= 1e9:
- return invalidNanos
- default:
- return 0
- }
-}
-
-func (x *Timestamp) Reset() {
- *x = Timestamp{}
- mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *Timestamp) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Timestamp) ProtoMessage() {}
-
-func (x *Timestamp) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead.
-func (*Timestamp) Descriptor() ([]byte, []int) {
- return file_google_protobuf_timestamp_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *Timestamp) GetSeconds() int64 {
- if x != nil {
- return x.Seconds
- }
- return 0
-}
-
-func (x *Timestamp) GetNanos() int32 {
- if x != nil {
- return x.Nanos
- }
- return 0
-}
-
-var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor
-
-const file_google_protobuf_timestamp_proto_rawDesc = "" +
- "\n" +
- "\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\";\n" +
- "\tTimestamp\x12\x18\n" +
- "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" +
- "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x85\x01\n" +
- "\x13com.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
-
-var (
- file_google_protobuf_timestamp_proto_rawDescOnce sync.Once
- file_google_protobuf_timestamp_proto_rawDescData []byte
-)
-
-func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte {
- file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() {
- file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc)))
- })
- return file_google_protobuf_timestamp_proto_rawDescData
-}
-
-var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_timestamp_proto_goTypes = []any{
- (*Timestamp)(nil), // 0: google.protobuf.Timestamp
-}
-var file_google_protobuf_timestamp_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_google_protobuf_timestamp_proto_init() }
-func file_google_protobuf_timestamp_proto_init() {
- if File_google_protobuf_timestamp_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc)),
- NumEnums: 0,
- NumMessages: 1,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_protobuf_timestamp_proto_goTypes,
- DependencyIndexes: file_google_protobuf_timestamp_proto_depIdxs,
- MessageInfos: file_google_protobuf_timestamp_proto_msgTypes,
- }.Build()
- File_google_protobuf_timestamp_proto = out.File
- file_google_protobuf_timestamp_proto_goTypes = nil
- file_google_protobuf_timestamp_proto_depIdxs = nil
-}
diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
deleted file mode 100644
index b7c2d0607..000000000
--- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
+++ /dev/null
@@ -1,648 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Wrappers for primitive (non-message) types. These types were needed
-// for legacy reasons and are not recommended for use in new APIs.
-//
-// Historically these wrappers were useful to have presence on proto3 primitive
-// fields, but proto3 syntax has been updated to support the `optional` keyword.
-// Using that keyword is now the strongly preferred way to add presence to
-// proto3 primitive fields.
-//
-// A secondary usecase was to embed primitives in the `google.protobuf.Any`
-// type: it is now recommended that you embed your value in your own wrapper
-// message which can be specifically documented.
-//
-// These wrappers have no meaningful use within repeated fields as they lack
-// the ability to detect presence on individual elements.
-// These wrappers have no meaningful use within a map or a oneof since
-// individual entries of a map or fields of a oneof can already detect presence.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/wrappers.proto
-
-package wrapperspb
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
- unsafe "unsafe"
-)
-
-// Wrapper message for `double`.
-//
-// The JSON representation for `DoubleValue` is JSON number.
-//
-// Not recommended for use in new APIs, but still useful for legacy APIs and
-// has no plan to be removed.
-type DoubleValue struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // The double value.
- Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-// Double stores v in a new DoubleValue and returns a pointer to it.
-func Double(v float64) *DoubleValue {
- return &DoubleValue{Value: v}
-}
-
-func (x *DoubleValue) Reset() {
- *x = DoubleValue{}
- mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *DoubleValue) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DoubleValue) ProtoMessage() {}
-
-func (x *DoubleValue) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DoubleValue.ProtoReflect.Descriptor instead.
-func (*DoubleValue) Descriptor() ([]byte, []int) {
- return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *DoubleValue) GetValue() float64 {
- if x != nil {
- return x.Value
- }
- return 0
-}
-
-// Wrapper message for `float`.
-//
-// The JSON representation for `FloatValue` is JSON number.
-//
-// Not recommended for use in new APIs, but still useful for legacy APIs and
-// has no plan to be removed.
-type FloatValue struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // The float value.
- Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-// Float stores v in a new FloatValue and returns a pointer to it.
-func Float(v float32) *FloatValue {
- return &FloatValue{Value: v}
-}
-
-func (x *FloatValue) Reset() {
- *x = FloatValue{}
- mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *FloatValue) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FloatValue) ProtoMessage() {}
-
-func (x *FloatValue) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FloatValue.ProtoReflect.Descriptor instead.
-func (*FloatValue) Descriptor() ([]byte, []int) {
- return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *FloatValue) GetValue() float32 {
- if x != nil {
- return x.Value
- }
- return 0
-}
-
-// Wrapper message for `int64`.
-//
-// The JSON representation for `Int64Value` is JSON string.
-//
-// Not recommended for use in new APIs, but still useful for legacy APIs and
-// has no plan to be removed.
-type Int64Value struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // The int64 value.
- Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-// Int64 stores v in a new Int64Value and returns a pointer to it.
-func Int64(v int64) *Int64Value {
- return &Int64Value{Value: v}
-}
-
-func (x *Int64Value) Reset() {
- *x = Int64Value{}
- mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *Int64Value) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Int64Value) ProtoMessage() {}
-
-func (x *Int64Value) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Int64Value.ProtoReflect.Descriptor instead.
-func (*Int64Value) Descriptor() ([]byte, []int) {
- return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *Int64Value) GetValue() int64 {
- if x != nil {
- return x.Value
- }
- return 0
-}
-
-// Wrapper message for `uint64`.
-//
-// The JSON representation for `UInt64Value` is JSON string.
-//
-// Not recommended for use in new APIs, but still useful for legacy APIs and
-// has no plan to be removed.
-type UInt64Value struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // The uint64 value.
- Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-// UInt64 stores v in a new UInt64Value and returns a pointer to it.
-func UInt64(v uint64) *UInt64Value {
- return &UInt64Value{Value: v}
-}
-
-func (x *UInt64Value) Reset() {
- *x = UInt64Value{}
- mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *UInt64Value) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UInt64Value) ProtoMessage() {}
-
-func (x *UInt64Value) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UInt64Value.ProtoReflect.Descriptor instead.
-func (*UInt64Value) Descriptor() ([]byte, []int) {
- return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *UInt64Value) GetValue() uint64 {
- if x != nil {
- return x.Value
- }
- return 0
-}
-
-// Wrapper message for `int32`.
-//
-// The JSON representation for `Int32Value` is JSON number.
-//
-// Not recommended for use in new APIs, but still useful for legacy APIs and
-// has no plan to be removed.
-type Int32Value struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // The int32 value.
- Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-// Int32 stores v in a new Int32Value and returns a pointer to it.
-func Int32(v int32) *Int32Value {
- return &Int32Value{Value: v}
-}
-
-func (x *Int32Value) Reset() {
- *x = Int32Value{}
- mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *Int32Value) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Int32Value) ProtoMessage() {}
-
-func (x *Int32Value) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Int32Value.ProtoReflect.Descriptor instead.
-func (*Int32Value) Descriptor() ([]byte, []int) {
- return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *Int32Value) GetValue() int32 {
- if x != nil {
- return x.Value
- }
- return 0
-}
-
-// Wrapper message for `uint32`.
-//
-// The JSON representation for `UInt32Value` is JSON number.
-//
-// Not recommended for use in new APIs, but still useful for legacy APIs and
-// has no plan to be removed.
-type UInt32Value struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // The uint32 value.
- Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-// UInt32 stores v in a new UInt32Value and returns a pointer to it.
-func UInt32(v uint32) *UInt32Value {
- return &UInt32Value{Value: v}
-}
-
-func (x *UInt32Value) Reset() {
- *x = UInt32Value{}
- mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *UInt32Value) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UInt32Value) ProtoMessage() {}
-
-func (x *UInt32Value) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UInt32Value.ProtoReflect.Descriptor instead.
-func (*UInt32Value) Descriptor() ([]byte, []int) {
- return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *UInt32Value) GetValue() uint32 {
- if x != nil {
- return x.Value
- }
- return 0
-}
-
-// Wrapper message for `bool`.
-//
-// The JSON representation for `BoolValue` is JSON `true` and `false`.
-//
-// Not recommended for use in new APIs, but still useful for legacy APIs and
-// has no plan to be removed.
-type BoolValue struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // The bool value.
- Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-// Bool stores v in a new BoolValue and returns a pointer to it.
-func Bool(v bool) *BoolValue {
- return &BoolValue{Value: v}
-}
-
-func (x *BoolValue) Reset() {
- *x = BoolValue{}
- mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *BoolValue) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*BoolValue) ProtoMessage() {}
-
-func (x *BoolValue) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use BoolValue.ProtoReflect.Descriptor instead.
-func (*BoolValue) Descriptor() ([]byte, []int) {
- return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *BoolValue) GetValue() bool {
- if x != nil {
- return x.Value
- }
- return false
-}
-
-// Wrapper message for `string`.
-//
-// The JSON representation for `StringValue` is JSON string.
-//
-// Not recommended for use in new APIs, but still useful for legacy APIs and
-// has no plan to be removed.
-type StringValue struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // The string value.
- Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-// String stores v in a new StringValue and returns a pointer to it.
-func String(v string) *StringValue {
- return &StringValue{Value: v}
-}
-
-func (x *StringValue) Reset() {
- *x = StringValue{}
- mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StringValue) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StringValue) ProtoMessage() {}
-
-func (x *StringValue) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StringValue.ProtoReflect.Descriptor instead.
-func (*StringValue) Descriptor() ([]byte, []int) {
- return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *StringValue) GetValue() string {
- if x != nil {
- return x.Value
- }
- return ""
-}
-
-// Wrapper message for `bytes`.
-//
-// The JSON representation for `BytesValue` is JSON string.
-//
-// Not recommended for use in new APIs, but still useful for legacy APIs and
-// has no plan to be removed.
-type BytesValue struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // The bytes value.
- Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-// Bytes stores v in a new BytesValue and returns a pointer to it.
-func Bytes(v []byte) *BytesValue {
- return &BytesValue{Value: v}
-}
-
-func (x *BytesValue) Reset() {
- *x = BytesValue{}
- mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *BytesValue) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*BytesValue) ProtoMessage() {}
-
-func (x *BytesValue) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use BytesValue.ProtoReflect.Descriptor instead.
-func (*BytesValue) Descriptor() ([]byte, []int) {
- return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *BytesValue) GetValue() []byte {
- if x != nil {
- return x.Value
- }
- return nil
-}
-
-var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor
-
-const file_google_protobuf_wrappers_proto_rawDesc = "" +
- "\n" +
- "\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"#\n" +
- "\vDoubleValue\x12\x14\n" +
- "\x05value\x18\x01 \x01(\x01R\x05value\"\"\n" +
- "\n" +
- "FloatValue\x12\x14\n" +
- "\x05value\x18\x01 \x01(\x02R\x05value\"\"\n" +
- "\n" +
- "Int64Value\x12\x14\n" +
- "\x05value\x18\x01 \x01(\x03R\x05value\"#\n" +
- "\vUInt64Value\x12\x14\n" +
- "\x05value\x18\x01 \x01(\x04R\x05value\"\"\n" +
- "\n" +
- "Int32Value\x12\x14\n" +
- "\x05value\x18\x01 \x01(\x05R\x05value\"#\n" +
- "\vUInt32Value\x12\x14\n" +
- "\x05value\x18\x01 \x01(\rR\x05value\"!\n" +
- "\tBoolValue\x12\x14\n" +
- "\x05value\x18\x01 \x01(\bR\x05value\"#\n" +
- "\vStringValue\x12\x14\n" +
- "\x05value\x18\x01 \x01(\tR\x05value\"\"\n" +
- "\n" +
- "BytesValue\x12\x14\n" +
- "\x05value\x18\x01 \x01(\fR\x05valueB\x83\x01\n" +
- "\x13com.google.protobufB\rWrappersProtoP\x01Z1google.golang.org/protobuf/types/known/wrapperspb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
-
-var (
- file_google_protobuf_wrappers_proto_rawDescOnce sync.Once
- file_google_protobuf_wrappers_proto_rawDescData []byte
-)
-
-func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte {
- file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() {
- file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc)))
- })
- return file_google_protobuf_wrappers_proto_rawDescData
-}
-
-var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
-var file_google_protobuf_wrappers_proto_goTypes = []any{
- (*DoubleValue)(nil), // 0: google.protobuf.DoubleValue
- (*FloatValue)(nil), // 1: google.protobuf.FloatValue
- (*Int64Value)(nil), // 2: google.protobuf.Int64Value
- (*UInt64Value)(nil), // 3: google.protobuf.UInt64Value
- (*Int32Value)(nil), // 4: google.protobuf.Int32Value
- (*UInt32Value)(nil), // 5: google.protobuf.UInt32Value
- (*BoolValue)(nil), // 6: google.protobuf.BoolValue
- (*StringValue)(nil), // 7: google.protobuf.StringValue
- (*BytesValue)(nil), // 8: google.protobuf.BytesValue
-}
-var file_google_protobuf_wrappers_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_google_protobuf_wrappers_proto_init() }
-func file_google_protobuf_wrappers_proto_init() {
- if File_google_protobuf_wrappers_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc)),
- NumEnums: 0,
- NumMessages: 9,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_protobuf_wrappers_proto_goTypes,
- DependencyIndexes: file_google_protobuf_wrappers_proto_depIdxs,
- MessageInfos: file_google_protobuf_wrappers_proto_msgTypes,
- }.Build()
- File_google_protobuf_wrappers_proto = out.File
- file_google_protobuf_wrappers_proto_goTypes = nil
- file_google_protobuf_wrappers_proto_depIdxs = nil
-}
diff --git a/vendor/k8s.io/api/admissionregistration/v1/doc.go b/vendor/k8s.io/api/admissionregistration/v1/doc.go
index ca0086188..ec0ebb9c4 100644
--- a/vendor/k8s.io/api/admissionregistration/v1/doc.go
+++ b/vendor/k8s.io/api/admissionregistration/v1/doc.go
@@ -24,4 +24,4 @@ limitations under the License.
// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration
// MutatingWebhookConfiguration and ValidatingWebhookConfiguration are for the
// new dynamic admission controller configuration.
-package v1 // import "k8s.io/api/admissionregistration/v1"
+package v1
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
index 98066211d..344af9ae0 100644
--- a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// +groupName=admissionregistration.k8s.io
// Package v1alpha1 is the v1alpha1 version of the API.
-package v1alpha1 // import "k8s.io/api/admissionregistration/v1alpha1"
+package v1alpha1
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
index 88344ce87..d23f21cc8 100644
--- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
@@ -272,9 +272,9 @@ message MatchResources {
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1;
- // ObjectSelector decides whether to run the validation based on if the
+ // ObjectSelector decides whether to run the policy based on if the
// object has matching labels. objectSelector is evaluated against both
- // the oldObject and newObject that would be sent to the cel validation, and
+ // the oldObject and newObject that would be sent to the policy's expression (CEL), and
// is considered to match if either object matches the selector. A null
// object (oldObject in the case of create, or newObject in the case of
// delete) or an object that cannot have labels (like a
@@ -286,13 +286,13 @@ message MatchResources {
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2;
- // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches.
+ // ResourceRules describes what operations on what resources/subresources the admission policy matches.
// The policy cares about an operation if it matches _any_ Rule.
// +listType=atomic
// +optional
repeated NamedRuleWithOperations resourceRules = 3;
- // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about.
+ // ExcludeResourceRules describes what operations on what resources/subresources the policy should not care about.
// The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
// +listType=atomic
// +optional
@@ -304,12 +304,13 @@ message MatchResources {
// - Exact: match a request only if it exactly matches a specified rule.
// For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
// but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
- // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.
+ // the admission policy does not consider requests to apps/v1beta1 or extensions/v1beta1 API groups.
//
// - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version.
// For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
// and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
- // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.
+ // the admission policy **does** consider requests made to apps/v1beta1 or extensions/v1beta1
+ // API groups. The API server translates the request to a matched resource API if necessary.
//
// Defaults to "Equivalent"
// +optional
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
index ee50fbe2d..f183498a5 100644
--- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
@@ -56,9 +56,9 @@ const (
type FailurePolicyType string
const (
- // Ignore means that an error calling the webhook is ignored.
+ // Ignore means that an error calling the admission webhook or admission policy is ignored.
Ignore FailurePolicyType = "Ignore"
- // Fail means that an error calling the webhook causes the admission to fail.
+ // Fail means that an error calling the admission webhook or admission policy causes resource admission to fail.
Fail FailurePolicyType = "Fail"
)
@@ -67,9 +67,11 @@ const (
type MatchPolicyType string
const (
- // Exact means requests should only be sent to the webhook if they exactly match a given rule.
+ // Exact means requests should only be sent to the admission webhook or admission policy if they exactly match a given rule.
Exact MatchPolicyType = "Exact"
- // Equivalent means requests should be sent to the webhook if they modify a resource listed in rules via another API group or version.
+ // Equivalent means requests should be sent to the admission webhook or admission policy if they modify a resource listed
+ // in rules via an equivalent API group or version. For example, `autoscaling/v1` and `autoscaling/v2`
+ // HorizontalPodAutoscalers are equivalent: the same set of resources appear via both APIs.
Equivalent MatchPolicyType = "Equivalent"
)
@@ -577,9 +579,9 @@ type MatchResources struct {
// Default to the empty LabelSelector, which matches everything.
// +optional
NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,1,opt,name=namespaceSelector"`
- // ObjectSelector decides whether to run the validation based on if the
+ // ObjectSelector decides whether to run the policy based on if the
// object has matching labels. objectSelector is evaluated against both
- // the oldObject and newObject that would be sent to the cel validation, and
+ // the oldObject and newObject that would be sent to the policy's expression (CEL), and
// is considered to match if either object matches the selector. A null
// object (oldObject in the case of create, or newObject in the case of
// delete) or an object that cannot have labels (like a
@@ -590,12 +592,12 @@ type MatchResources struct {
// Default to the empty LabelSelector, which matches everything.
// +optional
ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,2,opt,name=objectSelector"`
- // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches.
+ // ResourceRules describes what operations on what resources/subresources the admission policy matches.
// The policy cares about an operation if it matches _any_ Rule.
// +listType=atomic
// +optional
ResourceRules []NamedRuleWithOperations `json:"resourceRules,omitempty" protobuf:"bytes,3,rep,name=resourceRules"`
- // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about.
+ // ExcludeResourceRules describes what operations on what resources/subresources the policy should not care about.
// The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
// +listType=atomic
// +optional
@@ -606,12 +608,13 @@ type MatchResources struct {
// - Exact: match a request only if it exactly matches a specified rule.
// For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
// but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
- // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.
+ // the admission policy does not consider requests to apps/v1beta1 or extensions/v1beta1 API groups.
//
// - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version.
// For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
// and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
- // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.
+ // the admission policy **does** consider requests made to apps/v1beta1 or extensions/v1beta1
+ // API groups. The API server translates the request to a matched resource API if necessary.
//
// Defaults to "Equivalent"
// +optional
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
index 32222a81b..116e56e06 100644
--- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
@@ -68,10 +68,10 @@ func (JSONPatch) SwaggerDoc() map[string]string {
var map_MatchResources = map[string]string{
"": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)",
"namespaceSelector": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.",
- "objectSelector": "ObjectSelector decides whether to run the validation based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the cel validation, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.",
- "resourceRules": "ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule.",
- "excludeResourceRules": "ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)",
- "matchPolicy": "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\n\nDefaults to \"Equivalent\"",
+ "objectSelector": "ObjectSelector decides whether to run the policy based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the policy's expression (CEL), and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.",
+ "resourceRules": "ResourceRules describes what operations on what resources/subresources the admission policy matches. The policy cares about an operation if it matches _any_ Rule.",
+ "excludeResourceRules": "ExcludeResourceRules describes what operations on what resources/subresources the policy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)",
+ "matchPolicy": "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, the admission policy does not consider requests to apps/v1beta1 or extensions/v1beta1 API groups.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, the admission policy **does** consider requests made to apps/v1beta1 or extensions/v1beta1 API groups. The API server translates the request to a matched resource API if necessary.\n\nDefaults to \"Equivalent\"",
}
func (MatchResources) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go
index 0095cb257..40d831573 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go
@@ -24,4 +24,4 @@ limitations under the License.
// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration
// MutatingWebhookConfiguration and ValidatingWebhookConfiguration are for the
// new dynamic admission controller configuration.
-package v1beta1 // import "k8s.io/api/admissionregistration/v1beta1"
+package v1beta1
diff --git a/vendor/k8s.io/api/apidiscovery/v2/doc.go b/vendor/k8s.io/api/apidiscovery/v2/doc.go
index 4f3ad5f13..f46d33e94 100644
--- a/vendor/k8s.io/api/apidiscovery/v2/doc.go
+++ b/vendor/k8s.io/api/apidiscovery/v2/doc.go
@@ -20,4 +20,4 @@ limitations under the License.
// +k8s:prerelease-lifecycle-gen=true
// +groupName=apidiscovery.k8s.io
-package v2 // import "k8s.io/api/apidiscovery/v2"
+package v2
diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go b/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go
index e85da226e..d4fceab68 100644
--- a/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go
+++ b/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// +groupName=apidiscovery.k8s.io
-package v2beta1 // import "k8s.io/api/apidiscovery/v2beta1"
+package v2beta1
diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go
index a4da95d44..867d74165 100644
--- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go
+++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go
@@ -22,4 +22,4 @@ limitations under the License.
// Package v1alpha1 contains the v1alpha1 version of the API used by the
// apiservers themselves.
-package v1alpha1 // import "k8s.io/api/apiserverinternal/v1alpha1"
+package v1alpha1
diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go
index d189e860f..51fe12c53 100644
--- a/vendor/k8s.io/api/apps/v1/doc.go
+++ b/vendor/k8s.io/api/apps/v1/doc.go
@@ -19,4 +19,4 @@ limitations under the License.
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
-package v1 // import "k8s.io/api/apps/v1"
+package v1
diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go
index ea62a099f..eacc25931 100644
--- a/vendor/k8s.io/api/apps/v1/generated.pb.go
+++ b/vendor/k8s.io/api/apps/v1/generated.pb.go
@@ -928,145 +928,147 @@ func init() {
}
var fileDescriptor_5b781835628d5338 = []byte{
- // 2194 bytes of a gzipped FileDescriptorProto
+ // 2225 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7,
- 0x15, 0xd7, 0xf2, 0x43, 0xa2, 0x86, 0x96, 0x64, 0x8f, 0x54, 0x89, 0xb1, 0x1b, 0xd2, 0xdd, 0xb8,
- 0xb6, 0x12, 0xc7, 0x64, 0xed, 0x38, 0x41, 0xe0, 0x14, 0x09, 0x44, 0x2a, 0x4d, 0xd3, 0xe8, 0xab,
- 0x43, 0xcb, 0x01, 0xdc, 0xb4, 0xe8, 0x68, 0x39, 0xa6, 0x36, 0xde, 0x2f, 0xec, 0x0e, 0x15, 0x0b,
- 0xbd, 0x14, 0x05, 0x7a, 0xeb, 0xa1, 0x7f, 0x43, 0xff, 0x81, 0xa2, 0x28, 0x9a, 0x5b, 0x10, 0x04,
- 0xbd, 0xf8, 0x52, 0x20, 0xe8, 0xa5, 0x39, 0x11, 0x35, 0x73, 0x2a, 0x8a, 0xde, 0xda, 0x8b, 0x2f,
- 0x2d, 0x66, 0x76, 0xf6, 0x7b, 0x56, 0xa4, 0xe4, 0x58, 0x69, 0x82, 0xdc, 0xb8, 0x33, 0xbf, 0xf7,
- 0xdb, 0x37, 0x33, 0xef, 0xcd, 0xfb, 0xcd, 0x2c, 0x81, 0x7a, 0xff, 0x55, 0xaf, 0xa9, 0xdb, 0x2d,
- 0xec, 0xe8, 0x2d, 0xec, 0x38, 0x5e, 0xeb, 0xe0, 0x7a, 0xab, 0x4f, 0x2c, 0xe2, 0x62, 0x4a, 0x7a,
- 0x4d, 0xc7, 0xb5, 0xa9, 0x0d, 0xa1, 0x8f, 0x69, 0x62, 0x47, 0x6f, 0x32, 0x4c, 0xf3, 0xe0, 0xfa,
- 0xf9, 0x6b, 0x7d, 0x9d, 0xee, 0x0f, 0xf6, 0x9a, 0x9a, 0x6d, 0xb6, 0xfa, 0x76, 0xdf, 0x6e, 0x71,
- 0xe8, 0xde, 0xe0, 0x1e, 0x7f, 0xe2, 0x0f, 0xfc, 0x97, 0x4f, 0x71, 0x3e, 0xfe, 0x1a, 0xcd, 0x76,
- 0x89, 0xe4, 0x35, 0xe7, 0x6f, 0x46, 0x18, 0x13, 0x6b, 0xfb, 0xba, 0x45, 0xdc, 0xc3, 0x96, 0x73,
- 0xbf, 0xcf, 0x1a, 0xbc, 0x96, 0x49, 0x28, 0x96, 0x59, 0xb5, 0xf2, 0xac, 0xdc, 0x81, 0x45, 0x75,
- 0x93, 0x64, 0x0c, 0x5e, 0x19, 0x67, 0xe0, 0x69, 0xfb, 0xc4, 0xc4, 0x19, 0xbb, 0x97, 0xf2, 0xec,
- 0x06, 0x54, 0x37, 0x5a, 0xba, 0x45, 0x3d, 0xea, 0xa6, 0x8d, 0xd4, 0xff, 0x28, 0x00, 0x76, 0x6c,
- 0x8b, 0xba, 0xb6, 0x61, 0x10, 0x17, 0x91, 0x03, 0xdd, 0xd3, 0x6d, 0x0b, 0xfe, 0x1c, 0x54, 0xd8,
- 0x78, 0x7a, 0x98, 0xe2, 0x9a, 0x72, 0x51, 0x59, 0xad, 0xde, 0xf8, 0x5e, 0x33, 0x9a, 0xe4, 0x90,
- 0xbe, 0xe9, 0xdc, 0xef, 0xb3, 0x06, 0xaf, 0xc9, 0xd0, 0xcd, 0x83, 0xeb, 0xcd, 0xed, 0xbd, 0xf7,
- 0x89, 0x46, 0x37, 0x09, 0xc5, 0x6d, 0xf8, 0x70, 0xd8, 0x98, 0x1a, 0x0d, 0x1b, 0x20, 0x6a, 0x43,
- 0x21, 0x2b, 0xdc, 0x06, 0x25, 0xce, 0x5e, 0xe0, 0xec, 0xd7, 0x72, 0xd9, 0xc5, 0xa0, 0x9b, 0x08,
- 0x7f, 0xf0, 0xe6, 0x03, 0x4a, 0x2c, 0xe6, 0x5e, 0xfb, 0x8c, 0xa0, 0x2e, 0xad, 0x63, 0x8a, 0x11,
- 0x27, 0x82, 0x2f, 0x82, 0x8a, 0x2b, 0xdc, 0xaf, 0x15, 0x2f, 0x2a, 0xab, 0xc5, 0xf6, 0x59, 0x81,
- 0xaa, 0x04, 0xc3, 0x42, 0x21, 0x42, 0xfd, 0xb3, 0x02, 0x96, 0xb3, 0xe3, 0xde, 0xd0, 0x3d, 0x0a,
- 0xdf, 0xcb, 0x8c, 0xbd, 0x39, 0xd9, 0xd8, 0x99, 0x35, 0x1f, 0x79, 0xf8, 0xe2, 0xa0, 0x25, 0x36,
- 0xee, 0x77, 0x40, 0x59, 0xa7, 0xc4, 0xf4, 0x6a, 0x85, 0x8b, 0xc5, 0xd5, 0xea, 0x8d, 0xcb, 0xcd,
- 0x6c, 0xec, 0x36, 0xb3, 0x8e, 0xb5, 0xe7, 0x04, 0x65, 0xf9, 0x6d, 0x66, 0x8c, 0x7c, 0x0e, 0xf5,
- 0xbf, 0x0a, 0x98, 0x5d, 0xc7, 0xc4, 0xb4, 0xad, 0x2e, 0xa1, 0xa7, 0xb0, 0x68, 0x1d, 0x50, 0xf2,
- 0x1c, 0xa2, 0x89, 0x45, 0xfb, 0x8e, 0xcc, 0xf7, 0xd0, 0x9d, 0xae, 0x43, 0xb4, 0x68, 0xa1, 0xd8,
- 0x13, 0xe2, 0xc6, 0xf0, 0x1d, 0x30, 0xed, 0x51, 0x4c, 0x07, 0x1e, 0x5f, 0xa6, 0xea, 0x8d, 0xe7,
- 0x8e, 0xa6, 0xe1, 0xd0, 0xf6, 0xbc, 0x20, 0x9a, 0xf6, 0x9f, 0x91, 0xa0, 0x50, 0xff, 0x51, 0x00,
- 0x30, 0xc4, 0x76, 0x6c, 0xab, 0xa7, 0x53, 0x16, 0xbf, 0xb7, 0x40, 0x89, 0x1e, 0x3a, 0x84, 0x4f,
- 0xc3, 0x6c, 0xfb, 0x72, 0xe0, 0xc5, 0xed, 0x43, 0x87, 0x3c, 0x1e, 0x36, 0x96, 0xb3, 0x16, 0xac,
- 0x07, 0x71, 0x1b, 0xb8, 0x11, 0xfa, 0x57, 0xe0, 0xd6, 0x37, 0x93, 0xaf, 0x7e, 0x3c, 0x6c, 0x48,
- 0x36, 0x8b, 0x66, 0xc8, 0x94, 0x74, 0x10, 0x1e, 0x00, 0x68, 0x60, 0x8f, 0xde, 0x76, 0xb1, 0xe5,
- 0xf9, 0x6f, 0xd2, 0x4d, 0x22, 0x46, 0xfe, 0xc2, 0x64, 0xcb, 0xc3, 0x2c, 0xda, 0xe7, 0x85, 0x17,
- 0x70, 0x23, 0xc3, 0x86, 0x24, 0x6f, 0x80, 0x97, 0xc1, 0xb4, 0x4b, 0xb0, 0x67, 0x5b, 0xb5, 0x12,
- 0x1f, 0x45, 0x38, 0x81, 0x88, 0xb7, 0x22, 0xd1, 0x0b, 0x9f, 0x07, 0x33, 0x26, 0xf1, 0x3c, 0xdc,
- 0x27, 0xb5, 0x32, 0x07, 0x2e, 0x08, 0xe0, 0xcc, 0xa6, 0xdf, 0x8c, 0x82, 0x7e, 0xf5, 0x0f, 0x0a,
- 0x98, 0x0b, 0x67, 0xee, 0x14, 0x52, 0xa5, 0x9d, 0x4c, 0x95, 0x67, 0x8f, 0x8c, 0x93, 0x9c, 0x0c,
- 0xf9, 0xb8, 0x18, 0xf3, 0x99, 0x05, 0x21, 0xfc, 0x29, 0xa8, 0x78, 0xc4, 0x20, 0x1a, 0xb5, 0x5d,
- 0xe1, 0xf3, 0x4b, 0x13, 0xfa, 0x8c, 0xf7, 0x88, 0xd1, 0x15, 0xa6, 0xed, 0x33, 0xcc, 0xe9, 0xe0,
- 0x09, 0x85, 0x94, 0xf0, 0xc7, 0xa0, 0x42, 0x89, 0xe9, 0x18, 0x98, 0x12, 0x91, 0x26, 0x89, 0xf8,
- 0x66, 0xe1, 0xc2, 0xc8, 0x76, 0xec, 0xde, 0x6d, 0x01, 0xe3, 0x89, 0x12, 0xce, 0x43, 0xd0, 0x8a,
- 0x42, 0x1a, 0x78, 0x1f, 0xcc, 0x0f, 0x9c, 0x1e, 0x43, 0x52, 0xb6, 0x75, 0xf7, 0x0f, 0x45, 0xf8,
- 0x5c, 0x3d, 0x72, 0x42, 0x76, 0x13, 0x26, 0xed, 0x65, 0xf1, 0x82, 0xf9, 0x64, 0x3b, 0x4a, 0x51,
- 0xc3, 0x35, 0xb0, 0x60, 0xea, 0x16, 0x22, 0xb8, 0x77, 0xd8, 0x25, 0x9a, 0x6d, 0xf5, 0x3c, 0x1e,
- 0x40, 0xe5, 0xf6, 0x8a, 0x20, 0x58, 0xd8, 0x4c, 0x76, 0xa3, 0x34, 0x1e, 0x6e, 0x80, 0xa5, 0x60,
- 0x9f, 0xfd, 0xa1, 0xee, 0x51, 0xdb, 0x3d, 0xdc, 0xd0, 0x4d, 0x9d, 0xd6, 0xa6, 0x39, 0x4f, 0x6d,
- 0x34, 0x6c, 0x2c, 0x21, 0x49, 0x3f, 0x92, 0x5a, 0xa9, 0xbf, 0x99, 0x06, 0x0b, 0xa9, 0xdd, 0x00,
- 0xde, 0x01, 0xcb, 0xda, 0xc0, 0x75, 0x89, 0x45, 0xb7, 0x06, 0xe6, 0x1e, 0x71, 0xbb, 0xda, 0x3e,
- 0xe9, 0x0d, 0x0c, 0xd2, 0xe3, 0x2b, 0x5a, 0x6e, 0xd7, 0x85, 0xaf, 0xcb, 0x1d, 0x29, 0x0a, 0xe5,
- 0x58, 0xc3, 0x1f, 0x01, 0x68, 0xf1, 0xa6, 0x4d, 0xdd, 0xf3, 0x42, 0xce, 0x02, 0xe7, 0x0c, 0x13,
- 0x70, 0x2b, 0x83, 0x40, 0x12, 0x2b, 0xe6, 0x63, 0x8f, 0x78, 0xba, 0x4b, 0x7a, 0x69, 0x1f, 0x8b,
- 0x49, 0x1f, 0xd7, 0xa5, 0x28, 0x94, 0x63, 0x0d, 0x5f, 0x06, 0x55, 0xff, 0x6d, 0x7c, 0xce, 0xc5,
- 0xe2, 0x2c, 0x0a, 0xb2, 0xea, 0x56, 0xd4, 0x85, 0xe2, 0x38, 0x36, 0x34, 0x7b, 0xcf, 0x23, 0xee,
- 0x01, 0xe9, 0xbd, 0xe5, 0x6b, 0x00, 0x56, 0x28, 0xcb, 0xbc, 0x50, 0x86, 0x43, 0xdb, 0xce, 0x20,
- 0x90, 0xc4, 0x8a, 0x0d, 0xcd, 0x8f, 0x9a, 0xcc, 0xd0, 0xa6, 0x93, 0x43, 0xdb, 0x95, 0xa2, 0x50,
- 0x8e, 0x35, 0x8b, 0x3d, 0xdf, 0xe5, 0xb5, 0x03, 0xac, 0x1b, 0x78, 0xcf, 0x20, 0xb5, 0x99, 0x64,
- 0xec, 0x6d, 0x25, 0xbb, 0x51, 0x1a, 0x0f, 0xdf, 0x02, 0xe7, 0xfc, 0xa6, 0x5d, 0x0b, 0x87, 0x24,
- 0x15, 0x4e, 0xf2, 0x8c, 0x20, 0x39, 0xb7, 0x95, 0x06, 0xa0, 0xac, 0x0d, 0xbc, 0x05, 0xe6, 0x35,
- 0xdb, 0x30, 0x78, 0x3c, 0x76, 0xec, 0x81, 0x45, 0x6b, 0xb3, 0x9c, 0x05, 0xb2, 0x1c, 0xea, 0x24,
- 0x7a, 0x50, 0x0a, 0x09, 0xef, 0x02, 0xa0, 0x05, 0xe5, 0xc0, 0xab, 0x81, 0xfc, 0x42, 0x9f, 0xad,
- 0x43, 0x51, 0x01, 0x0e, 0x9b, 0x3c, 0x14, 0x63, 0x53, 0x3f, 0x56, 0xc0, 0x4a, 0x4e, 0x8e, 0xc3,
- 0x37, 0x12, 0x55, 0xef, 0x6a, 0xaa, 0xea, 0x5d, 0xc8, 0x31, 0x8b, 0x95, 0x3e, 0x0d, 0xcc, 0x31,
- 0xdd, 0xa1, 0x5b, 0x7d, 0x1f, 0x22, 0x76, 0xb0, 0x17, 0x64, 0xbe, 0xa3, 0x38, 0x30, 0xda, 0x86,
- 0xcf, 0x8d, 0x86, 0x8d, 0xb9, 0x44, 0x1f, 0x4a, 0x72, 0xaa, 0xbf, 0x2a, 0x00, 0xb0, 0x4e, 0x1c,
- 0xc3, 0x3e, 0x34, 0x89, 0x75, 0x1a, 0xaa, 0x65, 0x3d, 0xa1, 0x5a, 0x54, 0xe9, 0x42, 0x84, 0xfe,
- 0xe4, 0xca, 0x96, 0x8d, 0x94, 0x6c, 0xb9, 0x34, 0x86, 0xe7, 0x68, 0xdd, 0xf2, 0xb7, 0x22, 0x58,
- 0x8c, 0xc0, 0x91, 0x70, 0x79, 0x2d, 0xb1, 0x84, 0x57, 0x52, 0x4b, 0xb8, 0x22, 0x31, 0x79, 0x6a,
- 0xca, 0xe5, 0x7d, 0x30, 0xcf, 0x74, 0x85, 0xbf, 0x6a, 0x5c, 0xb5, 0x4c, 0x1f, 0x5b, 0xb5, 0x84,
- 0x55, 0x67, 0x23, 0xc1, 0x84, 0x52, 0xcc, 0x39, 0x2a, 0x69, 0xe6, 0xab, 0xa8, 0x92, 0xfe, 0xa8,
- 0x80, 0xf9, 0x68, 0x99, 0x4e, 0x41, 0x26, 0x75, 0x92, 0x32, 0xa9, 0x7e, 0x74, 0x5c, 0xe6, 0xe8,
- 0xa4, 0xbf, 0x96, 0xe2, 0x5e, 0x73, 0xa1, 0xb4, 0xca, 0x0e, 0x54, 0x8e, 0xa1, 0x6b, 0xd8, 0x13,
- 0x65, 0xf5, 0x8c, 0x7f, 0x98, 0xf2, 0xdb, 0x50, 0xd8, 0x9b, 0x90, 0x54, 0x85, 0xa7, 0x2b, 0xa9,
- 0x8a, 0x5f, 0x8c, 0xa4, 0xba, 0x0d, 0x2a, 0x5e, 0x20, 0xa6, 0x4a, 0x9c, 0xf2, 0xf2, 0xb8, 0x74,
- 0x16, 0x3a, 0x2a, 0x64, 0x0d, 0x15, 0x54, 0xc8, 0x24, 0xd3, 0x4e, 0xe5, 0x2f, 0x53, 0x3b, 0xb1,
- 0xf0, 0x76, 0xf0, 0xc0, 0x23, 0x3d, 0x9e, 0x4a, 0x95, 0x28, 0xbc, 0x77, 0x78, 0x2b, 0x12, 0xbd,
- 0x70, 0x17, 0xac, 0x38, 0xae, 0xdd, 0x77, 0x89, 0xe7, 0xad, 0x13, 0xdc, 0x33, 0x74, 0x8b, 0x04,
- 0x03, 0xf0, 0xab, 0xde, 0x85, 0xd1, 0xb0, 0xb1, 0xb2, 0x23, 0x87, 0xa0, 0x3c, 0x5b, 0xf5, 0xa3,
- 0x12, 0x38, 0x9b, 0xde, 0x11, 0x73, 0x84, 0x88, 0x72, 0x22, 0x21, 0xf2, 0x62, 0x2c, 0x44, 0x7d,
- 0x95, 0x16, 0x3b, 0xf3, 0x67, 0xc2, 0x74, 0x0d, 0x2c, 0x08, 0xe1, 0x11, 0x74, 0x0a, 0x29, 0x16,
- 0x2e, 0xcf, 0x6e, 0xb2, 0x1b, 0xa5, 0xf1, 0xf0, 0x35, 0x30, 0xe7, 0x72, 0x6d, 0x15, 0x10, 0xf8,
- 0xfa, 0xe4, 0x5b, 0x82, 0x60, 0x0e, 0xc5, 0x3b, 0x51, 0x12, 0xcb, 0xb4, 0x49, 0x24, 0x39, 0x02,
- 0x82, 0x52, 0x52, 0x9b, 0xac, 0xa5, 0x01, 0x28, 0x6b, 0x03, 0x37, 0xc1, 0xe2, 0xc0, 0xca, 0x52,
- 0xf9, 0xb1, 0x76, 0x41, 0x50, 0x2d, 0xee, 0x66, 0x21, 0x48, 0x66, 0x07, 0x7f, 0x92, 0x90, 0x2b,
- 0xd3, 0x7c, 0x17, 0xb9, 0x72, 0x74, 0x3a, 0x4c, 0xac, 0x57, 0x24, 0x3a, 0xaa, 0x32, 0xa9, 0x8e,
- 0x52, 0x3f, 0x54, 0x00, 0xcc, 0xa6, 0xe0, 0xd8, 0xc3, 0x7d, 0xc6, 0x22, 0x56, 0x22, 0x7b, 0x72,
- 0x85, 0x73, 0x75, 0xbc, 0xc2, 0x89, 0x76, 0xd0, 0xc9, 0x24, 0x8e, 0x98, 0xde, 0xd3, 0xb9, 0x98,
- 0x99, 0x40, 0xe2, 0x44, 0xfe, 0x3c, 0x99, 0xc4, 0x89, 0xf1, 0x1c, 0x2d, 0x71, 0xfe, 0x59, 0x00,
- 0x8b, 0x11, 0x78, 0x62, 0x89, 0x23, 0x31, 0xf9, 0xe6, 0x72, 0x66, 0x32, 0xd9, 0x11, 0x4d, 0xdd,
- 0xff, 0x89, 0xec, 0x88, 0x1c, 0xca, 0x91, 0x1d, 0xbf, 0x2f, 0xc4, 0xbd, 0x3e, 0xa6, 0xec, 0xf8,
- 0x02, 0xae, 0x2a, 0xbe, 0x72, 0xca, 0x45, 0xfd, 0xa4, 0x08, 0xce, 0xa6, 0x53, 0x30, 0x51, 0x07,
- 0x95, 0xb1, 0x75, 0x70, 0x07, 0x2c, 0xdd, 0x1b, 0x18, 0xc6, 0x21, 0x1f, 0x43, 0xac, 0x18, 0xfa,
- 0x15, 0xf4, 0xdb, 0xc2, 0x72, 0xe9, 0x07, 0x12, 0x0c, 0x92, 0x5a, 0x66, 0xcb, 0x62, 0xe9, 0x49,
- 0xcb, 0x62, 0xf9, 0x04, 0x65, 0x51, 0xae, 0x2c, 0x8a, 0x27, 0x52, 0x16, 0x13, 0xd7, 0x44, 0xc9,
- 0x76, 0x35, 0xf6, 0x0c, 0x3f, 0x52, 0xc0, 0xb2, 0xfc, 0xf8, 0x0c, 0x0d, 0x30, 0x6f, 0xe2, 0x07,
- 0xf1, 0xcb, 0x8b, 0x71, 0x05, 0x63, 0x40, 0x75, 0xa3, 0xe9, 0x7f, 0xdd, 0x69, 0xbe, 0x6d, 0xd1,
- 0x6d, 0xb7, 0x4b, 0x5d, 0xdd, 0xea, 0xfb, 0x05, 0x76, 0x33, 0xc1, 0x85, 0x52, 0xdc, 0xf0, 0x2e,
- 0xa8, 0x98, 0xf8, 0x41, 0x77, 0xe0, 0xf6, 0x83, 0x42, 0x78, 0xfc, 0xf7, 0xf0, 0xd8, 0xdf, 0x14,
- 0x2c, 0x28, 0xe4, 0x53, 0x3f, 0x57, 0xc0, 0x4a, 0x4e, 0x05, 0xfd, 0x1a, 0x8d, 0xf2, 0x23, 0x05,
- 0x5c, 0x4c, 0x8c, 0x92, 0x65, 0x24, 0xb9, 0x37, 0x30, 0x78, 0x72, 0x0a, 0xc1, 0x72, 0x15, 0xcc,
- 0x3a, 0xd8, 0xa5, 0x7a, 0xa8, 0x74, 0xcb, 0xed, 0xb9, 0xd1, 0xb0, 0x31, 0xbb, 0x13, 0x34, 0xa2,
- 0xa8, 0x5f, 0x32, 0x37, 0x85, 0xa7, 0x37, 0x37, 0xea, 0xaf, 0x0b, 0xa0, 0x1a, 0x73, 0xf9, 0x14,
- 0xa4, 0xca, 0x9b, 0x09, 0xa9, 0x22, 0xfd, 0xf8, 0x13, 0x9f, 0xc3, 0x3c, 0xad, 0xb2, 0x99, 0xd2,
- 0x2a, 0xdf, 0x1d, 0x47, 0x74, 0xb4, 0x58, 0xf9, 0x57, 0x01, 0x2c, 0xc5, 0xd0, 0x91, 0x5a, 0xf9,
- 0x7e, 0x42, 0xad, 0xac, 0xa6, 0xd4, 0x4a, 0x4d, 0x66, 0xf3, 0x8d, 0x5c, 0x19, 0x2f, 0x57, 0xfe,
- 0xa4, 0x80, 0x85, 0xd8, 0xdc, 0x9d, 0x82, 0x5e, 0x59, 0x4f, 0xea, 0x95, 0xc6, 0x98, 0x78, 0xc9,
- 0x11, 0x2c, 0xb7, 0xc0, 0x62, 0x0c, 0xb4, 0xed, 0xf6, 0x74, 0x0b, 0x1b, 0x1e, 0x7c, 0x0e, 0x94,
- 0x3d, 0x8a, 0x5d, 0x1a, 0x64, 0x77, 0x60, 0xdb, 0x65, 0x8d, 0xc8, 0xef, 0x53, 0xff, 0xad, 0x80,
- 0x56, 0xcc, 0x78, 0x87, 0xb8, 0x9e, 0xee, 0x51, 0x62, 0xd1, 0x3b, 0xb6, 0x31, 0x30, 0x49, 0xc7,
- 0xc0, 0xba, 0x89, 0x08, 0x6b, 0xd0, 0x6d, 0x6b, 0xc7, 0x36, 0x74, 0xed, 0x10, 0x62, 0x50, 0xfd,
- 0x60, 0x9f, 0x58, 0xeb, 0xc4, 0x20, 0x54, 0x7c, 0xde, 0x98, 0x6d, 0xbf, 0x11, 0xdc, 0xf6, 0xbf,
- 0x1b, 0x75, 0x3d, 0x1e, 0x36, 0x56, 0x27, 0x61, 0xe4, 0xc1, 0x19, 0xe7, 0x84, 0x3f, 0x03, 0x80,
- 0x3d, 0x76, 0x35, 0x1c, 0x7c, 0xec, 0x98, 0x6d, 0xbf, 0x1e, 0xa4, 0xf0, 0xbb, 0x61, 0xcf, 0xb1,
- 0x5e, 0x10, 0x63, 0x54, 0x7f, 0x57, 0x49, 0x2c, 0xf5, 0xd7, 0xfe, 0x6e, 0xe9, 0x17, 0x60, 0xe9,
- 0x20, 0x9a, 0x9d, 0x00, 0xc0, 0x34, 0x11, 0x8b, 0xbb, 0xe7, 0xa5, 0xf4, 0xb2, 0x79, 0x8d, 0x94,
- 0xd8, 0x1d, 0x09, 0x1d, 0x92, 0xbe, 0x04, 0xbe, 0x0c, 0xaa, 0x4c, 0xcb, 0xe8, 0x1a, 0xd9, 0xc2,
- 0x66, 0x90, 0x86, 0xe1, 0xd7, 0xa1, 0x6e, 0xd4, 0x85, 0xe2, 0x38, 0xb8, 0x0f, 0x16, 0x1d, 0xbb,
- 0xb7, 0x89, 0x2d, 0xdc, 0x27, 0xac, 0x42, 0xfb, 0x4b, 0xc9, 0x6f, 0x9d, 0x66, 0xdb, 0xaf, 0x04,
- 0x37, 0x0a, 0x3b, 0x59, 0x08, 0x3b, 0xb1, 0x49, 0x9a, 0x79, 0x10, 0xc8, 0x28, 0xa1, 0x99, 0xf9,
- 0x98, 0x39, 0x93, 0xf9, 0x07, 0x88, 0x2c, 0x1f, 0x4f, 0xf8, 0x39, 0x33, 0xef, 0x3e, 0xad, 0x72,
- 0xa2, 0xfb, 0x34, 0xc9, 0x89, 0x63, 0xf6, 0x98, 0x27, 0x8e, 0x4f, 0x14, 0x70, 0xc9, 0x99, 0x20,
- 0x8d, 0x6a, 0x80, 0x4f, 0x4b, 0x67, 0xcc, 0xb4, 0x4c, 0x92, 0x91, 0xed, 0xd5, 0xd1, 0xb0, 0x71,
- 0x69, 0x12, 0x24, 0x9a, 0xc8, 0x35, 0x96, 0x34, 0xb6, 0xd8, 0xf9, 0x6a, 0x55, 0xee, 0xe6, 0x95,
- 0x31, 0x6e, 0x06, 0x1b, 0xa5, 0x9f, 0x87, 0xc1, 0x13, 0x0a, 0x69, 0xd4, 0x0f, 0xcb, 0xe0, 0x5c,
- 0xa6, 0x5a, 0x7f, 0x89, 0x77, 0x85, 0x99, 0x13, 0x4d, 0xf1, 0x18, 0x27, 0x9a, 0x35, 0xb0, 0x20,
- 0x3e, 0x30, 0xa7, 0x0e, 0x44, 0x61, 0x98, 0x74, 0x92, 0xdd, 0x28, 0x8d, 0x97, 0xdd, 0x55, 0x96,
- 0x8f, 0x79, 0x57, 0x19, 0xf7, 0x42, 0xfc, 0x2f, 0xca, 0xcf, 0xe7, 0xac, 0x17, 0xe2, 0xef, 0x51,
- 0x69, 0x3c, 0x7c, 0x3d, 0x48, 0xd6, 0x90, 0x61, 0x86, 0x33, 0xa4, 0xb2, 0x2f, 0x24, 0x48, 0xa1,
- 0x9f, 0xe8, 0x23, 0xea, 0x7b, 0x92, 0x8f, 0xa8, 0xab, 0x63, 0xc2, 0x6c, 0xf2, 0x6b, 0x49, 0xe9,
- 0xa1, 0xb3, 0x7a, 0xfc, 0x43, 0xa7, 0xfa, 0x17, 0x05, 0x3c, 0x93, 0xbb, 0x4d, 0xc1, 0xb5, 0x84,
- 0x7a, 0xbc, 0x96, 0x52, 0x8f, 0xcf, 0xe6, 0x1a, 0xc6, 0x24, 0xa4, 0x29, 0xbf, 0xb1, 0xbc, 0x39,
- 0xf6, 0xc6, 0x52, 0x72, 0x12, 0x19, 0x7f, 0x75, 0xd9, 0x7e, 0xf5, 0xe1, 0xa3, 0xfa, 0xd4, 0xa7,
- 0x8f, 0xea, 0x53, 0x9f, 0x3d, 0xaa, 0x4f, 0xfd, 0x72, 0x54, 0x57, 0x1e, 0x8e, 0xea, 0xca, 0xa7,
- 0xa3, 0xba, 0xf2, 0xd9, 0xa8, 0xae, 0xfc, 0x7d, 0x54, 0x57, 0x7e, 0xfb, 0x79, 0x7d, 0xea, 0x2e,
- 0xcc, 0xfe, 0x2b, 0xf3, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd3, 0xfa, 0xed, 0x70, 0xaa, 0x29,
- 0x00, 0x00,
+ 0x15, 0xd7, 0x52, 0xa4, 0x44, 0x0d, 0x2d, 0xc9, 0x1e, 0xa9, 0x12, 0x63, 0x37, 0xa4, 0xbb, 0x71,
+ 0x6d, 0x25, 0x8e, 0xc9, 0xda, 0x71, 0x82, 0xc0, 0x29, 0x12, 0x88, 0x54, 0x9a, 0xba, 0xd1, 0x57,
+ 0x87, 0x92, 0x03, 0xb8, 0x69, 0xd1, 0xd1, 0x72, 0x4c, 0x6d, 0xbc, 0x5f, 0xd8, 0x1d, 0x2a, 0x16,
+ 0x7a, 0x29, 0x0a, 0x14, 0xe8, 0x21, 0x87, 0xfe, 0x0d, 0xfd, 0x07, 0x8a, 0xa2, 0x68, 0x6e, 0x45,
+ 0x50, 0xf4, 0xe2, 0x4b, 0x81, 0xa0, 0x97, 0xe6, 0x44, 0xd4, 0xcc, 0xa9, 0x28, 0x7a, 0x6b, 0x2f,
+ 0xbe, 0xb4, 0x98, 0xd9, 0xd9, 0xef, 0x59, 0x91, 0x92, 0x63, 0xa5, 0x09, 0x7c, 0xe3, 0xce, 0x7b,
+ 0xef, 0x37, 0x6f, 0x66, 0xde, 0x9b, 0xf7, 0x9b, 0x19, 0x02, 0xf5, 0xfe, 0xeb, 0x5e, 0x43, 0xb7,
+ 0x9b, 0xd8, 0xd1, 0x9b, 0xd8, 0x71, 0xbc, 0xe6, 0xc1, 0xf5, 0x66, 0x8f, 0x58, 0xc4, 0xc5, 0x94,
+ 0x74, 0x1b, 0x8e, 0x6b, 0x53, 0x1b, 0x42, 0x5f, 0xa7, 0x81, 0x1d, 0xbd, 0xc1, 0x74, 0x1a, 0x07,
+ 0xd7, 0xcf, 0x5f, 0xeb, 0xe9, 0x74, 0xbf, 0xbf, 0xd7, 0xd0, 0x6c, 0xb3, 0xd9, 0xb3, 0x7b, 0x76,
+ 0x93, 0xab, 0xee, 0xf5, 0xef, 0xf1, 0x2f, 0xfe, 0xc1, 0x7f, 0xf9, 0x10, 0xe7, 0xe3, 0xdd, 0x68,
+ 0xb6, 0x4b, 0x24, 0xdd, 0x9c, 0xbf, 0x19, 0xe9, 0x98, 0x58, 0xdb, 0xd7, 0x2d, 0xe2, 0x1e, 0x36,
+ 0x9d, 0xfb, 0x3d, 0xd6, 0xe0, 0x35, 0x4d, 0x42, 0xb1, 0xcc, 0xaa, 0x99, 0x67, 0xe5, 0xf6, 0x2d,
+ 0xaa, 0x9b, 0x24, 0x63, 0xf0, 0xda, 0x28, 0x03, 0x4f, 0xdb, 0x27, 0x26, 0xce, 0xd8, 0xbd, 0x92,
+ 0x67, 0xd7, 0xa7, 0xba, 0xd1, 0xd4, 0x2d, 0xea, 0x51, 0x37, 0x6d, 0xa4, 0xfe, 0x47, 0x01, 0xb0,
+ 0x6d, 0x5b, 0xd4, 0xb5, 0x0d, 0x83, 0xb8, 0x88, 0x1c, 0xe8, 0x9e, 0x6e, 0x5b, 0xf0, 0xa7, 0xa0,
+ 0xcc, 0xc6, 0xd3, 0xc5, 0x14, 0x57, 0x95, 0x8b, 0xca, 0x4a, 0xe5, 0xc6, 0x77, 0x1a, 0xd1, 0x24,
+ 0x87, 0xf0, 0x0d, 0xe7, 0x7e, 0x8f, 0x35, 0x78, 0x0d, 0xa6, 0xdd, 0x38, 0xb8, 0xde, 0xd8, 0xda,
+ 0xfb, 0x80, 0x68, 0x74, 0x83, 0x50, 0xdc, 0x82, 0x0f, 0x07, 0xf5, 0x89, 0xe1, 0xa0, 0x0e, 0xa2,
+ 0x36, 0x14, 0xa2, 0xc2, 0x2d, 0x50, 0xe4, 0xe8, 0x05, 0x8e, 0x7e, 0x2d, 0x17, 0x5d, 0x0c, 0xba,
+ 0x81, 0xf0, 0x87, 0x6f, 0x3f, 0xa0, 0xc4, 0x62, 0xee, 0xb5, 0xce, 0x08, 0xe8, 0xe2, 0x1a, 0xa6,
+ 0x18, 0x71, 0x20, 0xf8, 0x32, 0x28, 0xbb, 0xc2, 0xfd, 0xea, 0xe4, 0x45, 0x65, 0x65, 0xb2, 0x75,
+ 0x56, 0x68, 0x95, 0x83, 0x61, 0xa1, 0x50, 0x43, 0xfd, 0xb3, 0x02, 0x96, 0xb2, 0xe3, 0x5e, 0xd7,
+ 0x3d, 0x0a, 0xdf, 0xcf, 0x8c, 0xbd, 0x31, 0xde, 0xd8, 0x99, 0x35, 0x1f, 0x79, 0xd8, 0x71, 0xd0,
+ 0x12, 0x1b, 0xf7, 0xbb, 0xa0, 0xa4, 0x53, 0x62, 0x7a, 0xd5, 0xc2, 0xc5, 0xc9, 0x95, 0xca, 0x8d,
+ 0xcb, 0x8d, 0x6c, 0xec, 0x36, 0xb2, 0x8e, 0xb5, 0x66, 0x05, 0x64, 0xe9, 0x36, 0x33, 0x46, 0x3e,
+ 0x86, 0xfa, 0x5f, 0x05, 0xcc, 0xac, 0x61, 0x62, 0xda, 0x56, 0x87, 0xd0, 0x53, 0x58, 0xb4, 0x36,
+ 0x28, 0x7a, 0x0e, 0xd1, 0xc4, 0xa2, 0x7d, 0x4b, 0xe6, 0x7b, 0xe8, 0x4e, 0xc7, 0x21, 0x5a, 0xb4,
+ 0x50, 0xec, 0x0b, 0x71, 0x63, 0xf8, 0x2e, 0x98, 0xf2, 0x28, 0xa6, 0x7d, 0x8f, 0x2f, 0x53, 0xe5,
+ 0xc6, 0x0b, 0x47, 0xc3, 0x70, 0xd5, 0xd6, 0x9c, 0x00, 0x9a, 0xf2, 0xbf, 0x91, 0x80, 0x50, 0xff,
+ 0x51, 0x00, 0x30, 0xd4, 0x6d, 0xdb, 0x56, 0x57, 0xa7, 0x2c, 0x7e, 0x6f, 0x81, 0x22, 0x3d, 0x74,
+ 0x08, 0x9f, 0x86, 0x99, 0xd6, 0xe5, 0xc0, 0x8b, 0x9d, 0x43, 0x87, 0x3c, 0x1e, 0xd4, 0x97, 0xb2,
+ 0x16, 0x4c, 0x82, 0xb8, 0x0d, 0x5c, 0x0f, 0xfd, 0x2b, 0x70, 0xeb, 0x9b, 0xc9, 0xae, 0x1f, 0x0f,
+ 0xea, 0x92, 0xcd, 0xa2, 0x11, 0x22, 0x25, 0x1d, 0x84, 0x07, 0x00, 0x1a, 0xd8, 0xa3, 0x3b, 0x2e,
+ 0xb6, 0x3c, 0xbf, 0x27, 0xdd, 0x24, 0x62, 0xe4, 0x2f, 0x8d, 0xb7, 0x3c, 0xcc, 0xa2, 0x75, 0x5e,
+ 0x78, 0x01, 0xd7, 0x33, 0x68, 0x48, 0xd2, 0x03, 0xbc, 0x0c, 0xa6, 0x5c, 0x82, 0x3d, 0xdb, 0xaa,
+ 0x16, 0xf9, 0x28, 0xc2, 0x09, 0x44, 0xbc, 0x15, 0x09, 0x29, 0x7c, 0x11, 0x4c, 0x9b, 0xc4, 0xf3,
+ 0x70, 0x8f, 0x54, 0x4b, 0x5c, 0x71, 0x5e, 0x28, 0x4e, 0x6f, 0xf8, 0xcd, 0x28, 0x90, 0xab, 0xbf,
+ 0x53, 0xc0, 0x6c, 0x38, 0x73, 0xa7, 0x90, 0x2a, 0xad, 0x64, 0xaa, 0x3c, 0x7f, 0x64, 0x9c, 0xe4,
+ 0x64, 0xc8, 0x27, 0x93, 0x31, 0x9f, 0x59, 0x10, 0xc2, 0x1f, 0x83, 0xb2, 0x47, 0x0c, 0xa2, 0x51,
+ 0xdb, 0x15, 0x3e, 0xbf, 0x32, 0xa6, 0xcf, 0x78, 0x8f, 0x18, 0x1d, 0x61, 0xda, 0x3a, 0xc3, 0x9c,
+ 0x0e, 0xbe, 0x50, 0x08, 0x09, 0x7f, 0x08, 0xca, 0x94, 0x98, 0x8e, 0x81, 0x29, 0x11, 0x69, 0x92,
+ 0x88, 0x6f, 0x16, 0x2e, 0x0c, 0x6c, 0xdb, 0xee, 0xee, 0x08, 0x35, 0x9e, 0x28, 0xe1, 0x3c, 0x04,
+ 0xad, 0x28, 0x84, 0x81, 0xf7, 0xc1, 0x5c, 0xdf, 0xe9, 0x32, 0x4d, 0xca, 0xb6, 0xee, 0xde, 0xa1,
+ 0x08, 0x9f, 0xab, 0x47, 0x4e, 0xc8, 0x6e, 0xc2, 0xa4, 0xb5, 0x24, 0x3a, 0x98, 0x4b, 0xb6, 0xa3,
+ 0x14, 0x34, 0x5c, 0x05, 0xf3, 0xa6, 0x6e, 0x21, 0x82, 0xbb, 0x87, 0x1d, 0xa2, 0xd9, 0x56, 0xd7,
+ 0xe3, 0x01, 0x54, 0x6a, 0x2d, 0x0b, 0x80, 0xf9, 0x8d, 0xa4, 0x18, 0xa5, 0xf5, 0xe1, 0x3a, 0x58,
+ 0x0c, 0xf6, 0xd9, 0xef, 0xeb, 0x1e, 0xb5, 0xdd, 0xc3, 0x75, 0xdd, 0xd4, 0x69, 0x75, 0x8a, 0xe3,
+ 0x54, 0x87, 0x83, 0xfa, 0x22, 0x92, 0xc8, 0x91, 0xd4, 0x4a, 0xfd, 0x68, 0x0a, 0xcc, 0xa7, 0x76,
+ 0x03, 0x78, 0x07, 0x2c, 0x69, 0x7d, 0xd7, 0x25, 0x16, 0xdd, 0xec, 0x9b, 0x7b, 0xc4, 0xed, 0x68,
+ 0xfb, 0xa4, 0xdb, 0x37, 0x48, 0x97, 0xaf, 0x68, 0xa9, 0x55, 0x13, 0xbe, 0x2e, 0xb5, 0xa5, 0x5a,
+ 0x28, 0xc7, 0x1a, 0xfe, 0x00, 0x40, 0x8b, 0x37, 0x6d, 0xe8, 0x9e, 0x17, 0x62, 0x16, 0x38, 0x66,
+ 0x98, 0x80, 0x9b, 0x19, 0x0d, 0x24, 0xb1, 0x62, 0x3e, 0x76, 0x89, 0xa7, 0xbb, 0xa4, 0x9b, 0xf6,
+ 0x71, 0x32, 0xe9, 0xe3, 0x9a, 0x54, 0x0b, 0xe5, 0x58, 0xc3, 0x57, 0x41, 0xc5, 0xef, 0x8d, 0xcf,
+ 0xb9, 0x58, 0x9c, 0x05, 0x01, 0x56, 0xd9, 0x8c, 0x44, 0x28, 0xae, 0xc7, 0x86, 0x66, 0xef, 0x79,
+ 0xc4, 0x3d, 0x20, 0xdd, 0x77, 0x7c, 0x0e, 0xc0, 0x0a, 0x65, 0x89, 0x17, 0xca, 0x70, 0x68, 0x5b,
+ 0x19, 0x0d, 0x24, 0xb1, 0x62, 0x43, 0xf3, 0xa3, 0x26, 0x33, 0xb4, 0xa9, 0xe4, 0xd0, 0x76, 0xa5,
+ 0x5a, 0x28, 0xc7, 0x9a, 0xc5, 0x9e, 0xef, 0xf2, 0xea, 0x01, 0xd6, 0x0d, 0xbc, 0x67, 0x90, 0xea,
+ 0x74, 0x32, 0xf6, 0x36, 0x93, 0x62, 0x94, 0xd6, 0x87, 0xef, 0x80, 0x73, 0x7e, 0xd3, 0xae, 0x85,
+ 0x43, 0x90, 0x32, 0x07, 0x79, 0x4e, 0x80, 0x9c, 0xdb, 0x4c, 0x2b, 0xa0, 0xac, 0x0d, 0xbc, 0x05,
+ 0xe6, 0x34, 0xdb, 0x30, 0x78, 0x3c, 0xb6, 0xed, 0xbe, 0x45, 0xab, 0x33, 0x1c, 0x05, 0xb2, 0x1c,
+ 0x6a, 0x27, 0x24, 0x28, 0xa5, 0x09, 0xef, 0x02, 0xa0, 0x05, 0xe5, 0xc0, 0xab, 0x82, 0xfc, 0x42,
+ 0x9f, 0xad, 0x43, 0x51, 0x01, 0x0e, 0x9b, 0x3c, 0x14, 0x43, 0x53, 0x3f, 0x51, 0xc0, 0x72, 0x4e,
+ 0x8e, 0xc3, 0xb7, 0x12, 0x55, 0xef, 0x6a, 0xaa, 0xea, 0x5d, 0xc8, 0x31, 0x8b, 0x95, 0x3e, 0x0d,
+ 0xcc, 0x32, 0xde, 0xa1, 0x5b, 0x3d, 0x5f, 0x45, 0xec, 0x60, 0x2f, 0xc9, 0x7c, 0x47, 0x71, 0xc5,
+ 0x68, 0x1b, 0x3e, 0x37, 0x1c, 0xd4, 0x67, 0x13, 0x32, 0x94, 0xc4, 0x54, 0x7f, 0x51, 0x00, 0x60,
+ 0x8d, 0x38, 0x86, 0x7d, 0x68, 0x12, 0xeb, 0x34, 0x58, 0xcb, 0x5a, 0x82, 0xb5, 0xa8, 0xd2, 0x85,
+ 0x08, 0xfd, 0xc9, 0xa5, 0x2d, 0xeb, 0x29, 0xda, 0x72, 0x69, 0x04, 0xce, 0xd1, 0xbc, 0xe5, 0x6f,
+ 0x93, 0x60, 0x21, 0x52, 0x8e, 0x88, 0xcb, 0x1b, 0x89, 0x25, 0xbc, 0x92, 0x5a, 0xc2, 0x65, 0x89,
+ 0xc9, 0x53, 0x63, 0x2e, 0x1f, 0x80, 0x39, 0xc6, 0x2b, 0xfc, 0x55, 0xe3, 0xac, 0x65, 0xea, 0xd8,
+ 0xac, 0x25, 0xac, 0x3a, 0xeb, 0x09, 0x24, 0x94, 0x42, 0xce, 0x61, 0x49, 0xd3, 0x5f, 0x45, 0x96,
+ 0xf4, 0x7b, 0x05, 0xcc, 0x45, 0xcb, 0x74, 0x0a, 0x34, 0xa9, 0x9d, 0xa4, 0x49, 0xb5, 0xa3, 0xe3,
+ 0x32, 0x87, 0x27, 0xfd, 0xb5, 0x18, 0xf7, 0x9a, 0x13, 0xa5, 0x15, 0x76, 0xa0, 0x72, 0x0c, 0x5d,
+ 0xc3, 0x9e, 0x28, 0xab, 0x67, 0xfc, 0xc3, 0x94, 0xdf, 0x86, 0x42, 0x69, 0x82, 0x52, 0x15, 0x9e,
+ 0x2e, 0xa5, 0x9a, 0xfc, 0x62, 0x28, 0xd5, 0x0e, 0x28, 0x7b, 0x01, 0x99, 0x2a, 0x72, 0xc8, 0xcb,
+ 0xa3, 0xd2, 0x59, 0xf0, 0xa8, 0x10, 0x35, 0x64, 0x50, 0x21, 0x92, 0x8c, 0x3b, 0x95, 0xbe, 0x4c,
+ 0xee, 0xc4, 0xc2, 0xdb, 0xc1, 0x7d, 0x8f, 0x74, 0x79, 0x2a, 0x95, 0xa3, 0xf0, 0xde, 0xe6, 0xad,
+ 0x48, 0x48, 0xe1, 0x2e, 0x58, 0x76, 0x5c, 0xbb, 0xe7, 0x12, 0xcf, 0x5b, 0x23, 0xb8, 0x6b, 0xe8,
+ 0x16, 0x09, 0x06, 0xe0, 0x57, 0xbd, 0x0b, 0xc3, 0x41, 0x7d, 0x79, 0x5b, 0xae, 0x82, 0xf2, 0x6c,
+ 0xd5, 0x5f, 0x95, 0xc0, 0xd9, 0xf4, 0x8e, 0x98, 0x43, 0x44, 0x94, 0x13, 0x11, 0x91, 0x97, 0x63,
+ 0x21, 0xea, 0xb3, 0xb4, 0xd8, 0x99, 0x3f, 0x13, 0xa6, 0xab, 0x60, 0x5e, 0x10, 0x8f, 0x40, 0x28,
+ 0xa8, 0x58, 0xb8, 0x3c, 0xbb, 0x49, 0x31, 0x4a, 0xeb, 0xc3, 0x37, 0xc0, 0xac, 0xcb, 0xb9, 0x55,
+ 0x00, 0xe0, 0xf3, 0x93, 0x6f, 0x08, 0x80, 0x59, 0x14, 0x17, 0xa2, 0xa4, 0x2e, 0xe3, 0x26, 0x11,
+ 0xe5, 0x08, 0x00, 0x8a, 0x49, 0x6e, 0xb2, 0x9a, 0x56, 0x40, 0x59, 0x1b, 0xb8, 0x01, 0x16, 0xfa,
+ 0x56, 0x16, 0xca, 0x8f, 0xb5, 0x0b, 0x02, 0x6a, 0x61, 0x37, 0xab, 0x82, 0x64, 0x76, 0xf0, 0x36,
+ 0x58, 0xa0, 0xc4, 0x35, 0x75, 0x0b, 0x53, 0xdd, 0xea, 0x85, 0x70, 0xfe, 0xca, 0x2f, 0x33, 0xa8,
+ 0x9d, 0xac, 0x18, 0xc9, 0x6c, 0xe0, 0x8f, 0x12, 0xcc, 0x67, 0x8a, 0x6f, 0x48, 0x57, 0x8e, 0xce,
+ 0xac, 0xb1, 0xa9, 0x8f, 0x84, 0x92, 0x95, 0xc7, 0xa5, 0x64, 0xea, 0xc7, 0x0a, 0x80, 0xd9, 0x6c,
+ 0x1e, 0x79, 0x4f, 0x90, 0xb1, 0x88, 0x55, 0xdb, 0xae, 0x9c, 0x2c, 0x5d, 0x1d, 0x4d, 0x96, 0xa2,
+ 0xcd, 0x78, 0x3c, 0xb6, 0x24, 0xa6, 0xf7, 0x74, 0xee, 0x78, 0xc6, 0x60, 0x4b, 0x91, 0x3f, 0x4f,
+ 0xc6, 0x96, 0x62, 0x38, 0x47, 0xb3, 0xa5, 0x7f, 0x16, 0xc0, 0x42, 0xa4, 0x3c, 0x36, 0x5b, 0x92,
+ 0x98, 0x3c, 0xbb, 0xe7, 0x19, 0x8f, 0xc1, 0x44, 0x53, 0xf7, 0x7f, 0xc2, 0x60, 0x22, 0x87, 0x72,
+ 0x18, 0xcc, 0x6f, 0x0b, 0x71, 0xaf, 0x8f, 0xc9, 0x60, 0xbe, 0x80, 0x5b, 0x8f, 0xaf, 0x1c, 0x09,
+ 0x52, 0x3f, 0x2a, 0x82, 0xb3, 0xe9, 0x14, 0x4c, 0x94, 0x54, 0x65, 0x64, 0x49, 0xdd, 0x06, 0x8b,
+ 0xf7, 0xfa, 0x86, 0x71, 0xc8, 0xc7, 0x10, 0xab, 0xab, 0x7e, 0x31, 0xfe, 0xa6, 0xb0, 0x5c, 0xfc,
+ 0x9e, 0x44, 0x07, 0x49, 0x2d, 0xb3, 0x15, 0xb6, 0xf8, 0xa4, 0x15, 0xb6, 0x74, 0x82, 0x0a, 0x9b,
+ 0x53, 0x12, 0xa7, 0x4f, 0x50, 0x12, 0xe5, 0x7c, 0x67, 0xf2, 0x44, 0x7c, 0x67, 0xec, 0xf2, 0x2a,
+ 0xd9, 0xf9, 0x46, 0xde, 0x2c, 0x0c, 0x15, 0xb0, 0x24, 0x3f, 0xd4, 0x43, 0x03, 0xcc, 0x99, 0xf8,
+ 0x41, 0xfc, 0x4a, 0x65, 0x54, 0xed, 0xe9, 0x53, 0xdd, 0x68, 0xf8, 0x6f, 0x4e, 0x8d, 0xdb, 0x16,
+ 0xdd, 0x72, 0x3b, 0xd4, 0xd5, 0xad, 0x9e, 0x5f, 0xab, 0x37, 0x12, 0x58, 0x28, 0x85, 0x0d, 0xef,
+ 0x82, 0xb2, 0x89, 0x1f, 0x74, 0xfa, 0x6e, 0x2f, 0xa8, 0xa9, 0xc7, 0xef, 0x87, 0xa7, 0xd1, 0x86,
+ 0x40, 0x41, 0x21, 0x9e, 0xfa, 0xb9, 0x02, 0x96, 0x73, 0x8a, 0xf1, 0xd7, 0x68, 0x94, 0x7f, 0x54,
+ 0xc0, 0xc5, 0xc4, 0x28, 0x59, 0x72, 0x93, 0x7b, 0x7d, 0x83, 0xe7, 0xb9, 0xe0, 0x3e, 0x57, 0xc1,
+ 0x8c, 0x83, 0x5d, 0xaa, 0x87, 0xfc, 0xbb, 0xd4, 0x9a, 0x1d, 0x0e, 0xea, 0x33, 0xdb, 0x41, 0x23,
+ 0x8a, 0xe4, 0x92, 0xb9, 0x29, 0x3c, 0xbd, 0xb9, 0x51, 0x7f, 0x59, 0x00, 0x95, 0x98, 0xcb, 0xa7,
+ 0xc0, 0x7a, 0xde, 0x4e, 0xb0, 0x1e, 0xe9, 0x93, 0x54, 0x7c, 0x0e, 0xf3, 0x68, 0xcf, 0x46, 0x8a,
+ 0xf6, 0x7c, 0x7b, 0x14, 0xd0, 0xd1, 0xbc, 0xe7, 0x5f, 0x05, 0xb0, 0x18, 0xd3, 0x8e, 0x88, 0xcf,
+ 0x77, 0x13, 0xc4, 0x67, 0x25, 0x45, 0x7c, 0xaa, 0x32, 0x9b, 0x67, 0xcc, 0x67, 0x34, 0xf3, 0xf9,
+ 0x83, 0x02, 0xe6, 0x63, 0x73, 0x77, 0x0a, 0xd4, 0x67, 0x2d, 0x49, 0x7d, 0xea, 0x23, 0xe2, 0x25,
+ 0x87, 0xfb, 0xdc, 0x02, 0x0b, 0x31, 0xa5, 0x2d, 0xb7, 0xab, 0x5b, 0xd8, 0xf0, 0xe0, 0x0b, 0xa0,
+ 0xe4, 0x51, 0xec, 0xd2, 0x20, 0xbb, 0x03, 0xdb, 0x0e, 0x6b, 0x44, 0xbe, 0x4c, 0xfd, 0xb7, 0x02,
+ 0x9a, 0x31, 0xe3, 0x6d, 0xe2, 0x7a, 0xba, 0x47, 0x89, 0x45, 0xef, 0xd8, 0x46, 0xdf, 0x24, 0x6d,
+ 0x03, 0xeb, 0x26, 0x22, 0xac, 0x41, 0xb7, 0xad, 0x6d, 0xdb, 0xd0, 0xb5, 0x43, 0x88, 0x41, 0xe5,
+ 0xc3, 0x7d, 0x62, 0xad, 0x11, 0x83, 0x50, 0xf1, 0xe8, 0x32, 0xd3, 0x7a, 0x2b, 0x78, 0x83, 0x78,
+ 0x2f, 0x12, 0x3d, 0x1e, 0xd4, 0x57, 0xc6, 0x41, 0xe4, 0xc1, 0x19, 0xc7, 0x84, 0x3f, 0x01, 0x80,
+ 0x7d, 0x76, 0x34, 0x1c, 0x3c, 0xc1, 0xcc, 0xb4, 0xde, 0x0c, 0x52, 0xf8, 0xbd, 0x50, 0x72, 0xac,
+ 0x0e, 0x62, 0x88, 0xea, 0x6f, 0xca, 0x89, 0xa5, 0xfe, 0xda, 0xdf, 0x78, 0xfd, 0x0c, 0x2c, 0x1e,
+ 0x44, 0xb3, 0x13, 0x28, 0x30, 0x7a, 0xc5, 0xe2, 0xee, 0x45, 0x29, 0xbc, 0x6c, 0x5e, 0x23, 0x52,
+ 0x77, 0x47, 0x02, 0x87, 0xa4, 0x9d, 0xc0, 0x57, 0x41, 0x85, 0x71, 0x19, 0x5d, 0x23, 0x9b, 0xd8,
+ 0x0c, 0xd2, 0x30, 0x7c, 0xb3, 0xea, 0x44, 0x22, 0x14, 0xd7, 0x83, 0xfb, 0x60, 0xc1, 0xb1, 0xbb,
+ 0x1b, 0xd8, 0xc2, 0x3d, 0xc2, 0x2a, 0xb4, 0xbf, 0x94, 0xfc, 0x2e, 0x6c, 0xa6, 0xf5, 0x5a, 0x70,
+ 0xcf, 0xb1, 0x9d, 0x55, 0x61, 0x87, 0x3f, 0x49, 0x33, 0x0f, 0x02, 0x19, 0x24, 0x34, 0x33, 0x4f,
+ 0xac, 0xd3, 0x99, 0xff, 0xa5, 0xc8, 0xf2, 0xf1, 0x84, 0x8f, 0xac, 0x79, 0xb7, 0x7c, 0xe5, 0x13,
+ 0xdd, 0xf2, 0x49, 0x0e, 0x2f, 0x33, 0xc7, 0x3c, 0xbc, 0xfc, 0x49, 0x01, 0x97, 0x9c, 0x31, 0xd2,
+ 0xa8, 0x0a, 0xf8, 0xb4, 0xb4, 0x47, 0x4c, 0xcb, 0x38, 0x19, 0xd9, 0x5a, 0x19, 0x0e, 0xea, 0x97,
+ 0xc6, 0xd1, 0x44, 0x63, 0xb9, 0xc6, 0x92, 0xc6, 0x16, 0x3b, 0x5f, 0xb5, 0xc2, 0xdd, 0xbc, 0x32,
+ 0xc2, 0xcd, 0x60, 0xa3, 0xf4, 0xf3, 0x30, 0xf8, 0x42, 0x21, 0x8c, 0xfa, 0x71, 0x09, 0x9c, 0xcb,
+ 0x54, 0xeb, 0x2f, 0xf1, 0x06, 0x33, 0x73, 0x38, 0x9a, 0x3c, 0xc6, 0xe1, 0x68, 0x15, 0xcc, 0x8b,
+ 0x67, 0xef, 0xd4, 0xd9, 0x2a, 0x0c, 0x93, 0x76, 0x52, 0x8c, 0xd2, 0xfa, 0xb2, 0x1b, 0xd4, 0xd2,
+ 0x31, 0x6f, 0x50, 0xe3, 0x5e, 0x88, 0x7f, 0x6b, 0xf9, 0xf9, 0x9c, 0xf5, 0x42, 0xfc, 0x69, 0x2b,
+ 0xad, 0x0f, 0xdf, 0x0c, 0x92, 0x35, 0x44, 0x98, 0xe6, 0x08, 0xa9, 0xec, 0x0b, 0x01, 0x52, 0xda,
+ 0x4f, 0xf4, 0xb4, 0xfb, 0xbe, 0xe4, 0x69, 0x77, 0x65, 0x44, 0x98, 0x8d, 0x7f, 0xc3, 0x29, 0x3d,
+ 0xbf, 0x56, 0x8e, 0x7f, 0x7e, 0x55, 0xff, 0xa2, 0x80, 0xe7, 0x72, 0xb7, 0x29, 0xb8, 0x9a, 0x60,
+ 0x8f, 0xd7, 0x52, 0xec, 0xf1, 0xf9, 0x5c, 0xc3, 0x18, 0x85, 0x34, 0xe5, 0x97, 0x9f, 0x37, 0x47,
+ 0x5e, 0x7e, 0x4a, 0x4e, 0x22, 0xa3, 0x6f, 0x41, 0x5b, 0xaf, 0x3f, 0x7c, 0x54, 0x9b, 0xf8, 0xf4,
+ 0x51, 0x6d, 0xe2, 0xb3, 0x47, 0xb5, 0x89, 0x9f, 0x0f, 0x6b, 0xca, 0xc3, 0x61, 0x4d, 0xf9, 0x74,
+ 0x58, 0x53, 0x3e, 0x1b, 0xd6, 0x94, 0xbf, 0x0f, 0x6b, 0xca, 0xaf, 0x3f, 0xaf, 0x4d, 0xdc, 0x85,
+ 0xd9, 0xff, 0x8a, 0xfe, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x0a, 0xea, 0xf9, 0x40, 0x2a, 0x00,
+ 0x00,
}
func (m *ControllerRevision) Marshal() (dAtA []byte, err error) {
@@ -1748,6 +1750,11 @@ func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.TerminatingReplicas != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas))
+ i--
+ dAtA[i] = 0x48
+ }
if m.CollisionCount != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount))
i--
@@ -2054,6 +2061,11 @@ func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.TerminatingReplicas != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas))
+ i--
+ dAtA[i] = 0x38
+ }
if len(m.Conditions) > 0 {
for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -2915,6 +2927,9 @@ func (m *DeploymentStatus) Size() (n int) {
if m.CollisionCount != nil {
n += 1 + sovGenerated(uint64(*m.CollisionCount))
}
+ if m.TerminatingReplicas != nil {
+ n += 1 + sovGenerated(uint64(*m.TerminatingReplicas))
+ }
return n
}
@@ -3020,6 +3035,9 @@ func (m *ReplicaSetStatus) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
+ if m.TerminatingReplicas != nil {
+ n += 1 + sovGenerated(uint64(*m.TerminatingReplicas))
+ }
return n
}
@@ -3435,6 +3453,7 @@ func (this *DeploymentStatus) String() string {
`Conditions:` + repeatedStringForConditions + `,`,
`ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
`CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
+ `TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`,
`}`,
}, "")
return s
@@ -3521,6 +3540,7 @@ func (this *ReplicaSetStatus) String() string {
`ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
`AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`,
`Conditions:` + repeatedStringForConditions + `,`,
+ `TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`,
`}`,
}, "")
return s
@@ -5941,6 +5961,26 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error {
}
}
m.CollisionCount = &v
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TerminatingReplicas = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -6873,6 +6913,26 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TerminatingReplicas = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto
index 388e638f4..38c8997e9 100644
--- a/vendor/k8s.io/api/apps/v1/generated.proto
+++ b/vendor/k8s.io/api/apps/v1/generated.proto
@@ -318,19 +318,19 @@ message DeploymentStatus {
// +optional
optional int64 observedGeneration = 1;
- // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+ // Total number of non-terminating pods targeted by this deployment (their labels match the selector).
// +optional
optional int32 replicas = 2;
- // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+ // Total number of non-terminating pods targeted by this deployment that have the desired template spec.
// +optional
optional int32 updatedReplicas = 3;
- // readyReplicas is the number of pods targeted by this Deployment with a Ready Condition.
+ // Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
// +optional
optional int32 readyReplicas = 7;
- // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+ // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
// +optional
optional int32 availableReplicas = 4;
@@ -340,6 +340,13 @@ message DeploymentStatus {
// +optional
optional int32 unavailableReplicas = 5;
+ // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
+ // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // +optional
+ optional int32 terminatingReplicas = 9;
+
// Represents the latest available observations of a deployment's current state.
// +patchMergeKey=type
// +patchStrategy=merge
@@ -421,16 +428,16 @@ message ReplicaSetList {
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// List of ReplicaSets.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
repeated ReplicaSet items = 2;
}
// ReplicaSetSpec is the specification of a ReplicaSet.
message ReplicaSetSpec {
- // Replicas is the number of desired replicas.
+ // Replicas is the number of desired pods.
// This is a pointer to distinguish between explicit zero and unspecified.
// Defaults to 1.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
// +optional
optional int32 replicas = 1;
@@ -448,29 +455,36 @@ message ReplicaSetSpec {
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template
// +optional
optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
}
// ReplicaSetStatus represents the current status of a ReplicaSet.
message ReplicaSetStatus {
- // Replicas is the most recently observed number of replicas.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ // Replicas is the most recently observed number of non-terminating pods.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
optional int32 replicas = 1;
- // The number of pods that have labels matching the labels of the pod template of the replicaset.
+ // The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.
// +optional
optional int32 fullyLabeledReplicas = 2;
- // readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition.
+ // The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.
// +optional
optional int32 readyReplicas = 4;
- // The number of available replicas (ready for at least minReadySeconds) for this replica set.
+ // The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.
// +optional
optional int32 availableReplicas = 5;
+ // The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
+ // and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // +optional
+ optional int32 terminatingReplicas = 7;
+
// ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
// +optional
optional int64 observedGeneration = 3;
@@ -702,6 +716,7 @@ message StatefulSetSpec {
// the network identity of the set. Pods get DNS/hostnames that follow the
// pattern: pod-specific-string.serviceName.default.svc.cluster.local
// where "pod-specific-string" is managed by the StatefulSet controller.
+ // +optional
optional string serviceName = 5;
// podManagementPolicy controls how pods are created during initial scale up,
diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go
index a68690b44..1362d875d 100644
--- a/vendor/k8s.io/api/apps/v1/types.go
+++ b/vendor/k8s.io/api/apps/v1/types.go
@@ -220,6 +220,7 @@ type StatefulSetSpec struct {
// the network identity of the set. Pods get DNS/hostnames that follow the
// pattern: pod-specific-string.serviceName.default.svc.cluster.local
// where "pod-specific-string" is managed by the StatefulSet controller.
+ // +optional
ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"`
// podManagementPolicy controls how pods are created during initial scale up,
@@ -486,19 +487,19 @@ type DeploymentStatus struct {
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
- // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+ // Total number of non-terminating pods targeted by this deployment (their labels match the selector).
// +optional
Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"`
- // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+ // Total number of non-terminating pods targeted by this deployment that have the desired template spec.
// +optional
UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"`
- // readyReplicas is the number of pods targeted by this Deployment with a Ready Condition.
+ // Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
// +optional
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"`
- // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+ // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
// +optional
AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"`
@@ -508,6 +509,13 @@ type DeploymentStatus struct {
// +optional
UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
+ // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
+ // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // +optional
+ TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,9,opt,name=terminatingReplicas"`
+
// Represents the latest available observations of a deployment's current state.
// +patchMergeKey=type
// +patchStrategy=merge
@@ -839,16 +847,16 @@ type ReplicaSetList struct {
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ReplicaSets.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// ReplicaSetSpec is the specification of a ReplicaSet.
type ReplicaSetSpec struct {
- // Replicas is the number of desired replicas.
+ // Replicas is the number of desired pods.
// This is a pointer to distinguish between explicit zero and unspecified.
// Defaults to 1.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
// +optional
Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
@@ -866,29 +874,36 @@ type ReplicaSetSpec struct {
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template
// +optional
Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
}
// ReplicaSetStatus represents the current status of a ReplicaSet.
type ReplicaSetStatus struct {
- // Replicas is the most recently observed number of replicas.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ // Replicas is the most recently observed number of non-terminating pods.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
- // The number of pods that have labels matching the labels of the pod template of the replicaset.
+ // The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.
// +optional
FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
- // readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition.
+ // The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.
// +optional
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
- // The number of available replicas (ready for at least minReadySeconds) for this replica set.
+ // The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.
// +optional
AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
+ // The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
+ // and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // +optional
+ TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,7,opt,name=terminatingReplicas"`
+
// ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
index 341ecdadb..f44ba7bc3 100644
--- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
@@ -177,11 +177,12 @@ func (DeploymentSpec) SwaggerDoc() map[string]string {
var map_DeploymentStatus = map[string]string{
"": "DeploymentStatus is the most recently observed status of the Deployment.",
"observedGeneration": "The generation observed by the deployment controller.",
- "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).",
- "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.",
- "readyReplicas": "readyReplicas is the number of pods targeted by this Deployment with a Ready Condition.",
- "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
+ "replicas": "Total number of non-terminating pods targeted by this deployment (their labels match the selector).",
+ "updatedReplicas": "Total number of non-terminating pods targeted by this deployment that have the desired template spec.",
+ "readyReplicas": "Total number of non-terminating pods targeted by this Deployment with a Ready Condition.",
+ "availableReplicas": "Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.",
"unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
+ "terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
"conditions": "Represents the latest available observations of a deployment's current state.",
"collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
}
@@ -227,7 +228,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string {
var map_ReplicaSetList = map[string]string{
"": "ReplicaSetList is a collection of ReplicaSets.",
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
- "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller",
+ "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset",
}
func (ReplicaSetList) SwaggerDoc() map[string]string {
@@ -236,10 +237,10 @@ func (ReplicaSetList) SwaggerDoc() map[string]string {
var map_ReplicaSetSpec = map[string]string{
"": "ReplicaSetSpec is the specification of a ReplicaSet.",
- "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
+ "replicas": "Replicas is the number of desired pods. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset",
"minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
"selector": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
- "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+ "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template",
}
func (ReplicaSetSpec) SwaggerDoc() map[string]string {
@@ -248,10 +249,11 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string {
var map_ReplicaSetStatus = map[string]string{
"": "ReplicaSetStatus represents the current status of a ReplicaSet.",
- "replicas": "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
- "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.",
- "readyReplicas": "readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition.",
- "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.",
+ "replicas": "Replicas is the most recently observed number of non-terminating pods. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset",
+ "fullyLabeledReplicas": "The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.",
+ "readyReplicas": "The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.",
+ "availableReplicas": "The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.",
+ "terminatingReplicas": "The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
"observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
"conditions": "Represents the latest available observations of a replica set's current state.",
}
diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go
index 6912986ac..9e67658ba 100644
--- a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go
@@ -363,6 +363,11 @@ func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
*out = *in
+ if in.TerminatingReplicas != nil {
+ in, out := &in.TerminatingReplicas, &out.TerminatingReplicas
+ *out = new(int32)
+ **out = **in
+ }
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]DeploymentCondition, len(*in))
@@ -517,6 +522,11 @@ func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) {
*out = *in
+ if in.TerminatingReplicas != nil {
+ in, out := &in.TerminatingReplicas, &out.TerminatingReplicas
+ *out = new(int32)
+ **out = **in
+ }
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]ReplicaSetCondition, len(*in))
diff --git a/vendor/k8s.io/api/apps/v1beta1/doc.go b/vendor/k8s.io/api/apps/v1beta1/doc.go
index 38a358551..7770fab5d 100644
--- a/vendor/k8s.io/api/apps/v1beta1/doc.go
+++ b/vendor/k8s.io/api/apps/v1beta1/doc.go
@@ -19,4 +19,4 @@ limitations under the License.
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
-package v1beta1 // import "k8s.io/api/apps/v1beta1"
+package v1beta1
diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go
index 76e755b4a..ae84aaf48 100644
--- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go
@@ -728,134 +728,135 @@ func init() {
}
var fileDescriptor_2747f709ac7c95e7 = []byte{
- // 2018 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xcd, 0x6f, 0x1b, 0xc7,
- 0x15, 0xf7, 0x52, 0xa2, 0x44, 0x3d, 0x45, 0x94, 0x3d, 0x52, 0x2d, 0x46, 0x69, 0x25, 0x61, 0x63,
- 0xc4, 0x4a, 0x62, 0x2f, 0x63, 0x25, 0x0d, 0x12, 0xbb, 0x75, 0x21, 0x4a, 0x6e, 0xec, 0x40, 0x8a,
- 0x94, 0x91, 0x64, 0xa3, 0xe9, 0x07, 0x32, 0x22, 0xc7, 0xd4, 0x46, 0xfb, 0x85, 0xdd, 0x21, 0x63,
- 0xa2, 0x97, 0xfe, 0x01, 0x05, 0xd2, 0x73, 0xff, 0x8a, 0xf6, 0xd4, 0xa2, 0x45, 0x2f, 0x3d, 0x14,
- 0x3e, 0x06, 0xbd, 0x34, 0x27, 0xa2, 0x66, 0xae, 0xed, 0xad, 0xbd, 0x18, 0x28, 0x50, 0xcc, 0xec,
- 0xec, 0xf7, 0xae, 0xb4, 0x2c, 0x60, 0x01, 0xcd, 0x8d, 0x3b, 0xef, 0xbd, 0xdf, 0x7b, 0xf3, 0xe6,
- 0xbd, 0x37, 0xef, 0x0d, 0xe1, 0xfa, 0xe9, 0x7b, 0x9e, 0xa6, 0xdb, 0x4d, 0xe2, 0xe8, 0x4d, 0xe2,
- 0x38, 0x5e, 0xb3, 0x7f, 0xeb, 0x98, 0x32, 0x72, 0xab, 0xd9, 0xa5, 0x16, 0x75, 0x09, 0xa3, 0x1d,
- 0xcd, 0x71, 0x6d, 0x66, 0xa3, 0x25, 0x9f, 0x51, 0x23, 0x8e, 0xae, 0x71, 0x46, 0x4d, 0x32, 0x2e,
- 0xdf, 0xec, 0xea, 0xec, 0xa4, 0x77, 0xac, 0xb5, 0x6d, 0xb3, 0xd9, 0xb5, 0xbb, 0x76, 0x53, 0xf0,
- 0x1f, 0xf7, 0x1e, 0x8b, 0x2f, 0xf1, 0x21, 0x7e, 0xf9, 0x38, 0xcb, 0x6a, 0x4c, 0x61, 0xdb, 0x76,
- 0x69, 0xb3, 0x9f, 0xd1, 0xb5, 0xfc, 0x4e, 0xc4, 0x63, 0x92, 0xf6, 0x89, 0x6e, 0x51, 0x77, 0xd0,
- 0x74, 0x4e, 0xbb, 0x7c, 0xc1, 0x6b, 0x9a, 0x94, 0x91, 0x3c, 0xa9, 0x66, 0x91, 0x94, 0xdb, 0xb3,
- 0x98, 0x6e, 0xd2, 0x8c, 0xc0, 0xbb, 0xe7, 0x09, 0x78, 0xed, 0x13, 0x6a, 0x92, 0x8c, 0xdc, 0xdb,
- 0x45, 0x72, 0x3d, 0xa6, 0x1b, 0x4d, 0xdd, 0x62, 0x1e, 0x73, 0xd3, 0x42, 0xea, 0xbf, 0x15, 0x40,
- 0x5b, 0xb6, 0xc5, 0x5c, 0xdb, 0x30, 0xa8, 0x8b, 0x69, 0x5f, 0xf7, 0x74, 0xdb, 0x42, 0x9f, 0x42,
- 0x8d, 0xef, 0xa7, 0x43, 0x18, 0x69, 0x28, 0x6b, 0xca, 0xfa, 0xec, 0xc6, 0x5b, 0x5a, 0xe4, 0xe9,
- 0x10, 0x5e, 0x73, 0x4e, 0xbb, 0x7c, 0xc1, 0xd3, 0x38, 0xb7, 0xd6, 0xbf, 0xa5, 0xed, 0x1d, 0x7f,
- 0x46, 0xdb, 0x6c, 0x97, 0x32, 0xd2, 0x42, 0x4f, 0x87, 0xab, 0x97, 0x46, 0xc3, 0x55, 0x88, 0xd6,
- 0x70, 0x88, 0x8a, 0xf6, 0x60, 0x52, 0xa0, 0x57, 0x04, 0xfa, 0xcd, 0x42, 0x74, 0xb9, 0x69, 0x0d,
- 0x93, 0xcf, 0xef, 0x3d, 0x61, 0xd4, 0xe2, 0xe6, 0xb5, 0x5e, 0x92, 0xd0, 0x93, 0xdb, 0x84, 0x11,
- 0x2c, 0x80, 0xd0, 0x0d, 0xa8, 0xb9, 0xd2, 0xfc, 0xc6, 0xc4, 0x9a, 0xb2, 0x3e, 0xd1, 0xba, 0x2c,
- 0xb9, 0x6a, 0xc1, 0xb6, 0x70, 0xc8, 0xa1, 0x3e, 0x55, 0xe0, 0x6a, 0x76, 0xdf, 0x3b, 0xba, 0xc7,
- 0xd0, 0x4f, 0x32, 0x7b, 0xd7, 0xca, 0xed, 0x9d, 0x4b, 0x8b, 0x9d, 0x87, 0x8a, 0x83, 0x95, 0xd8,
- 0xbe, 0xf7, 0xa1, 0xaa, 0x33, 0x6a, 0x7a, 0x8d, 0xca, 0xda, 0xc4, 0xfa, 0xec, 0xc6, 0x9b, 0x5a,
- 0x41, 0x00, 0x6b, 0x59, 0xeb, 0x5a, 0x73, 0x12, 0xb7, 0xfa, 0x80, 0x23, 0x60, 0x1f, 0x48, 0xfd,
- 0x65, 0x05, 0x60, 0x9b, 0x3a, 0x86, 0x3d, 0x30, 0xa9, 0xc5, 0x2e, 0xe0, 0xe8, 0x1e, 0xc0, 0xa4,
- 0xe7, 0xd0, 0xb6, 0x3c, 0xba, 0xeb, 0x85, 0x3b, 0x88, 0x8c, 0x3a, 0x70, 0x68, 0x3b, 0x3a, 0x34,
- 0xfe, 0x85, 0x05, 0x04, 0xfa, 0x18, 0xa6, 0x3c, 0x46, 0x58, 0xcf, 0x13, 0x47, 0x36, 0xbb, 0xf1,
- 0x7a, 0x19, 0x30, 0x21, 0xd0, 0xaa, 0x4b, 0xb8, 0x29, 0xff, 0x1b, 0x4b, 0x20, 0xf5, 0x6f, 0x13,
- 0xb0, 0x10, 0x31, 0x6f, 0xd9, 0x56, 0x47, 0x67, 0x3c, 0xa4, 0xef, 0xc0, 0x24, 0x1b, 0x38, 0x54,
- 0xf8, 0x64, 0xa6, 0x75, 0x3d, 0x30, 0xe6, 0x70, 0xe0, 0xd0, 0xe7, 0xc3, 0xd5, 0xa5, 0x1c, 0x11,
- 0x4e, 0xc2, 0x42, 0x08, 0xed, 0x84, 0x76, 0x56, 0x84, 0xf8, 0x3b, 0x49, 0xe5, 0xcf, 0x87, 0xab,
- 0x39, 0x05, 0x44, 0x0b, 0x91, 0x92, 0x26, 0xa2, 0xcf, 0xa0, 0x6e, 0x10, 0x8f, 0x1d, 0x39, 0x1d,
- 0xc2, 0xe8, 0xa1, 0x6e, 0xd2, 0xc6, 0x94, 0xd8, 0xfd, 0x1b, 0xe5, 0x0e, 0x8a, 0x4b, 0xb4, 0xae,
- 0x4a, 0x0b, 0xea, 0x3b, 0x09, 0x24, 0x9c, 0x42, 0x46, 0x7d, 0x40, 0x7c, 0xe5, 0xd0, 0x25, 0x96,
- 0xe7, 0xef, 0x8a, 0xeb, 0x9b, 0x1e, 0x5b, 0xdf, 0xb2, 0xd4, 0x87, 0x76, 0x32, 0x68, 0x38, 0x47,
- 0x03, 0x7a, 0x0d, 0xa6, 0x5c, 0x4a, 0x3c, 0xdb, 0x6a, 0x4c, 0x0a, 0x8f, 0x85, 0xc7, 0x85, 0xc5,
- 0x2a, 0x96, 0x54, 0xf4, 0x3a, 0x4c, 0x9b, 0xd4, 0xf3, 0x48, 0x97, 0x36, 0xaa, 0x82, 0x71, 0x5e,
- 0x32, 0x4e, 0xef, 0xfa, 0xcb, 0x38, 0xa0, 0xab, 0xbf, 0x57, 0xa0, 0x1e, 0x1d, 0xd3, 0x05, 0xe4,
- 0xea, 0xfd, 0x64, 0xae, 0xbe, 0x5a, 0x22, 0x38, 0x0b, 0x72, 0xf4, 0x1f, 0x15, 0x40, 0x11, 0x13,
- 0xb6, 0x0d, 0xe3, 0x98, 0xb4, 0x4f, 0xd1, 0x1a, 0x4c, 0x5a, 0xc4, 0x0c, 0x62, 0x32, 0x4c, 0x90,
- 0x8f, 0x88, 0x49, 0xb1, 0xa0, 0xa0, 0x2f, 0x14, 0x40, 0x3d, 0x71, 0x9a, 0x9d, 0x4d, 0xcb, 0xb2,
- 0x19, 0xe1, 0x0e, 0x0e, 0x0c, 0xda, 0x2a, 0x61, 0x50, 0xa0, 0x4b, 0x3b, 0xca, 0xa0, 0xdc, 0xb3,
- 0x98, 0x3b, 0x88, 0x0e, 0x36, 0xcb, 0x80, 0x73, 0x54, 0xa3, 0x1f, 0x03, 0xb8, 0x12, 0xf3, 0xd0,
- 0x96, 0x69, 0x5b, 0x5c, 0x03, 0x02, 0xf5, 0x5b, 0xb6, 0xf5, 0x58, 0xef, 0x46, 0x85, 0x05, 0x87,
- 0x10, 0x38, 0x06, 0xb7, 0x7c, 0x0f, 0x96, 0x0a, 0xec, 0x44, 0x97, 0x61, 0xe2, 0x94, 0x0e, 0x7c,
- 0x57, 0x61, 0xfe, 0x13, 0x2d, 0x42, 0xb5, 0x4f, 0x8c, 0x1e, 0xf5, 0x73, 0x12, 0xfb, 0x1f, 0xb7,
- 0x2b, 0xef, 0x29, 0xea, 0x6f, 0xaa, 0xf1, 0x48, 0xe1, 0xf5, 0x06, 0xad, 0xf3, 0xeb, 0xc1, 0x31,
- 0xf4, 0x36, 0xf1, 0x04, 0x46, 0xb5, 0xf5, 0x92, 0x7f, 0x35, 0xf8, 0x6b, 0x38, 0xa4, 0xa2, 0x9f,
- 0x42, 0xcd, 0xa3, 0x06, 0x6d, 0x33, 0xdb, 0x95, 0x25, 0xee, 0xed, 0x92, 0x31, 0x45, 0x8e, 0xa9,
- 0x71, 0x20, 0x45, 0x7d, 0xf8, 0xe0, 0x0b, 0x87, 0x90, 0xe8, 0x63, 0xa8, 0x31, 0x6a, 0x3a, 0x06,
- 0x61, 0x54, 0x7a, 0x2f, 0x11, 0x57, 0xbc, 0x76, 0x70, 0xb0, 0x7d, 0xbb, 0x73, 0x28, 0xd9, 0x44,
- 0xf5, 0x0c, 0xe3, 0x34, 0x58, 0xc5, 0x21, 0x0c, 0xfa, 0x11, 0xd4, 0x3c, 0xc6, 0x6f, 0xf5, 0xee,
- 0x40, 0x64, 0xdb, 0x59, 0xd7, 0x4a, 0xbc, 0x8e, 0xfa, 0x22, 0x11, 0x74, 0xb0, 0x82, 0x43, 0x38,
- 0xb4, 0x09, 0xf3, 0xa6, 0x6e, 0x61, 0x4a, 0x3a, 0x83, 0x03, 0xda, 0xb6, 0xad, 0x8e, 0x27, 0xd2,
- 0xb4, 0xda, 0x5a, 0x92, 0x42, 0xf3, 0xbb, 0x49, 0x32, 0x4e, 0xf3, 0xa3, 0x1d, 0x58, 0x0c, 0xae,
- 0xdd, 0xfb, 0xba, 0xc7, 0x6c, 0x77, 0xb0, 0xa3, 0x9b, 0x3a, 0x13, 0x35, 0xaf, 0xda, 0x6a, 0x8c,
- 0x86, 0xab, 0x8b, 0x38, 0x87, 0x8e, 0x73, 0xa5, 0x78, 0x5d, 0x71, 0x48, 0xcf, 0xa3, 0x1d, 0x51,
- 0xc3, 0x6a, 0x51, 0x5d, 0xd9, 0x17, 0xab, 0x58, 0x52, 0xd1, 0xa3, 0x44, 0x98, 0xd6, 0xc6, 0x0b,
- 0xd3, 0x7a, 0x71, 0x88, 0xa2, 0x23, 0x58, 0x72, 0x5c, 0xbb, 0xeb, 0x52, 0xcf, 0xdb, 0xa6, 0xa4,
- 0x63, 0xe8, 0x16, 0x0d, 0x3c, 0x33, 0x23, 0x76, 0xf4, 0xca, 0x68, 0xb8, 0xba, 0xb4, 0x9f, 0xcf,
- 0x82, 0x8b, 0x64, 0xd5, 0x3f, 0x4f, 0xc2, 0xe5, 0xf4, 0x1d, 0x87, 0x3e, 0x04, 0x64, 0x1f, 0x7b,
- 0xd4, 0xed, 0xd3, 0xce, 0x07, 0x7e, 0xe3, 0xc6, 0xbb, 0x1b, 0x45, 0x74, 0x37, 0x61, 0xde, 0xee,
- 0x65, 0x38, 0x70, 0x8e, 0x94, 0xdf, 0x1f, 0xc9, 0x04, 0xa8, 0x08, 0x43, 0x63, 0xfd, 0x51, 0x26,
- 0x09, 0x36, 0x61, 0x5e, 0xe6, 0x7e, 0x40, 0x14, 0xc1, 0x1a, 0x3b, 0xf7, 0xa3, 0x24, 0x19, 0xa7,
- 0xf9, 0xd1, 0x1d, 0x98, 0x73, 0x79, 0x1c, 0x84, 0x00, 0xd3, 0x02, 0xe0, 0x5b, 0x12, 0x60, 0x0e,
- 0xc7, 0x89, 0x38, 0xc9, 0x8b, 0x3e, 0x80, 0x2b, 0xa4, 0x4f, 0x74, 0x83, 0x1c, 0x1b, 0x34, 0x04,
- 0x98, 0x14, 0x00, 0x2f, 0x4b, 0x80, 0x2b, 0x9b, 0x69, 0x06, 0x9c, 0x95, 0x41, 0xbb, 0xb0, 0xd0,
- 0xb3, 0xb2, 0x50, 0x7e, 0x10, 0xbf, 0x22, 0xa1, 0x16, 0x8e, 0xb2, 0x2c, 0x38, 0x4f, 0x0e, 0x7d,
- 0x0a, 0xd0, 0x0e, 0x6e, 0x75, 0xaf, 0x31, 0x25, 0xca, 0xf0, 0x8d, 0x12, 0xc9, 0x16, 0xb6, 0x02,
- 0x51, 0x09, 0x0c, 0x97, 0x3c, 0x1c, 0xc3, 0x44, 0xb7, 0xa1, 0xde, 0xb6, 0x0d, 0x43, 0x44, 0xfe,
- 0x96, 0xdd, 0xb3, 0x98, 0x08, 0xde, 0x6a, 0x0b, 0xf1, 0xcb, 0x7e, 0x2b, 0x41, 0xc1, 0x29, 0x4e,
- 0xf5, 0x8f, 0x4a, 0xfc, 0x9a, 0x09, 0xd2, 0x19, 0xdd, 0x4e, 0xb4, 0x3e, 0xaf, 0xa5, 0x5a, 0x9f,
- 0xab, 0x59, 0x89, 0x58, 0xe7, 0xa3, 0xc3, 0x1c, 0x0f, 0x7e, 0xdd, 0xea, 0xfa, 0x07, 0x2e, 0x4b,
- 0xe2, 0x5b, 0x67, 0xa6, 0x52, 0xc8, 0x1d, 0xbb, 0x18, 0xaf, 0x88, 0x33, 0x8f, 0x13, 0x71, 0x12,
- 0x59, 0xbd, 0x0b, 0xf5, 0x64, 0x1e, 0x26, 0x7a, 0x7a, 0xe5, 0xdc, 0x9e, 0xfe, 0x6b, 0x05, 0x96,
- 0x0a, 0xb4, 0x23, 0x03, 0xea, 0x26, 0x79, 0x12, 0x3b, 0xe6, 0x73, 0x7b, 0x63, 0x3e, 0x35, 0x69,
- 0xfe, 0xd4, 0xa4, 0x3d, 0xb0, 0xd8, 0x9e, 0x7b, 0xc0, 0x5c, 0xdd, 0xea, 0xfa, 0xe7, 0xb0, 0x9b,
- 0xc0, 0xc2, 0x29, 0x6c, 0xf4, 0x09, 0xd4, 0x4c, 0xf2, 0xe4, 0xa0, 0xe7, 0x76, 0xf3, 0xfc, 0x55,
- 0x4e, 0x8f, 0xb8, 0x3f, 0x76, 0x25, 0x0a, 0x0e, 0xf1, 0xd4, 0x3f, 0x29, 0xb0, 0x96, 0xd8, 0x25,
- 0xaf, 0x15, 0xf4, 0x71, 0xcf, 0x38, 0xa0, 0xd1, 0x89, 0xbf, 0x09, 0x33, 0x0e, 0x71, 0x99, 0x1e,
- 0xd6, 0x8b, 0x6a, 0x6b, 0x6e, 0x34, 0x5c, 0x9d, 0xd9, 0x0f, 0x16, 0x71, 0x44, 0xcf, 0xf1, 0x4d,
- 0xe5, 0xc5, 0xf9, 0x46, 0xfd, 0x8f, 0x02, 0xd5, 0x83, 0x36, 0x31, 0xe8, 0x05, 0x4c, 0x2a, 0xdb,
- 0x89, 0x49, 0x45, 0x2d, 0x8c, 0x59, 0x61, 0x4f, 0xe1, 0x90, 0xb2, 0x93, 0x1a, 0x52, 0xae, 0x9d,
- 0x83, 0x73, 0xf6, 0x7c, 0xf2, 0x3e, 0xcc, 0x84, 0xea, 0x12, 0x45, 0x59, 0x39, 0xaf, 0x28, 0xab,
- 0xbf, 0xae, 0xc0, 0x6c, 0x4c, 0xc5, 0x78, 0xd2, 0xdc, 0xdd, 0xb1, 0xbe, 0x86, 0x17, 0xae, 0x8d,
- 0x32, 0x1b, 0xd1, 0x82, 0x1e, 0xc6, 0x6f, 0x17, 0xa3, 0x66, 0x21, 0xdb, 0xda, 0xdc, 0x85, 0x3a,
- 0x23, 0x6e, 0x97, 0xb2, 0x80, 0x26, 0x1c, 0x36, 0x13, 0xcd, 0x2a, 0x87, 0x09, 0x2a, 0x4e, 0x71,
- 0x2f, 0xdf, 0x81, 0xb9, 0x84, 0xb2, 0xb1, 0x7a, 0xbe, 0x2f, 0xb8, 0x73, 0xa2, 0x54, 0xb8, 0x80,
- 0xe8, 0xfa, 0x30, 0x11, 0x5d, 0xeb, 0xc5, 0xce, 0x8c, 0x25, 0x68, 0x51, 0x8c, 0xe1, 0x54, 0x8c,
- 0xbd, 0x51, 0x0a, 0xed, 0xec, 0x48, 0xfb, 0x67, 0x05, 0x16, 0x63, 0xdc, 0xd1, 0x28, 0xfc, 0xbd,
- 0xc4, 0x7d, 0xb0, 0x9e, 0xba, 0x0f, 0x1a, 0x79, 0x32, 0x2f, 0x6c, 0x16, 0xce, 0x9f, 0x4f, 0x27,
- 0xfe, 0x1f, 0xe7, 0xd3, 0x3f, 0x28, 0x30, 0x1f, 0xf3, 0xdd, 0x05, 0x0c, 0xa8, 0x0f, 0x92, 0x03,
- 0xea, 0xb5, 0x32, 0x41, 0x53, 0x30, 0xa1, 0xde, 0x86, 0x85, 0x18, 0xd3, 0x9e, 0xdb, 0xd1, 0x2d,
- 0x62, 0x78, 0xe8, 0x55, 0xa8, 0x7a, 0x8c, 0xb8, 0x2c, 0xb8, 0x44, 0x02, 0xd9, 0x03, 0xbe, 0x88,
- 0x7d, 0x9a, 0xfa, 0x2f, 0x05, 0x9a, 0x31, 0xe1, 0x7d, 0xea, 0x7a, 0xba, 0xc7, 0xa8, 0xc5, 0x1e,
- 0xda, 0x46, 0xcf, 0xa4, 0x5b, 0x06, 0xd1, 0x4d, 0x4c, 0xf9, 0x82, 0x6e, 0x5b, 0xfb, 0xb6, 0xa1,
- 0xb7, 0x07, 0x88, 0xc0, 0xec, 0xe7, 0x27, 0xd4, 0xda, 0xa6, 0x06, 0x65, 0xb4, 0x23, 0x43, 0xf1,
- 0x07, 0x12, 0x7e, 0xf6, 0x51, 0x44, 0x7a, 0x3e, 0x5c, 0x5d, 0x2f, 0x83, 0x28, 0x22, 0x34, 0x8e,
- 0x89, 0x7e, 0x06, 0xc0, 0x3f, 0x45, 0x2d, 0xeb, 0xc8, 0x60, 0xbd, 0x1b, 0x64, 0xf4, 0xa3, 0x90,
- 0x32, 0x96, 0x82, 0x18, 0xa2, 0xfa, 0xdb, 0x5a, 0xe2, 0xbc, 0xbf, 0xf1, 0x63, 0xe6, 0xcf, 0x61,
- 0xb1, 0x1f, 0x79, 0x27, 0x60, 0xe0, 0x6d, 0xf9, 0x44, 0xfa, 0xe9, 0x2e, 0x84, 0xcf, 0xf3, 0x6b,
- 0xeb, 0xdb, 0x52, 0xc9, 0xe2, 0xc3, 0x1c, 0x38, 0x9c, 0xab, 0x04, 0x7d, 0x17, 0x66, 0xf9, 0x48,
- 0xa3, 0xb7, 0xe9, 0x47, 0xc4, 0x0c, 0x72, 0x71, 0x21, 0x88, 0x97, 0x83, 0x88, 0x84, 0xe3, 0x7c,
- 0xe8, 0x04, 0x16, 0x1c, 0xbb, 0xb3, 0x4b, 0x2c, 0xd2, 0xa5, 0xbc, 0x11, 0xf4, 0x8f, 0x52, 0xcc,
- 0x9e, 0x33, 0xad, 0x77, 0x83, 0xf6, 0x7f, 0x3f, 0xcb, 0xf2, 0x9c, 0x0f, 0x71, 0xd9, 0x65, 0x11,
- 0x04, 0x79, 0x90, 0xc8, 0x85, 0x7a, 0x4f, 0xf6, 0x63, 0x72, 0x14, 0xf7, 0x1f, 0xd9, 0x36, 0xca,
- 0x24, 0xe5, 0x51, 0x42, 0x32, 0xba, 0x30, 0x93, 0xeb, 0x38, 0xa5, 0xa1, 0x70, 0xb4, 0xae, 0xfd,
- 0x4f, 0xa3, 0x75, 0xce, 0xac, 0x3f, 0x33, 0xe6, 0xac, 0xff, 0x17, 0x05, 0xae, 0x39, 0x25, 0x72,
- 0xa9, 0x01, 0xc2, 0x37, 0xf7, 0xcb, 0xf8, 0xa6, 0x4c, 0x6e, 0xb6, 0xd6, 0x47, 0xc3, 0xd5, 0x6b,
- 0x65, 0x38, 0x71, 0x29, 0xfb, 0xd0, 0x43, 0xa8, 0xd9, 0xb2, 0x06, 0x36, 0x66, 0x85, 0xad, 0x37,
- 0xca, 0xd8, 0x1a, 0xd4, 0x4d, 0x3f, 0x2d, 0x83, 0x2f, 0x1c, 0x62, 0xa9, 0xbf, 0xab, 0xc2, 0x95,
- 0xcc, 0x0d, 0x8e, 0x7e, 0x78, 0xc6, 0x9c, 0x7f, 0xf5, 0x85, 0xcd, 0xf8, 0x99, 0x01, 0x7d, 0x62,
- 0x8c, 0x01, 0x7d, 0x13, 0xe6, 0xdb, 0x3d, 0xd7, 0xa5, 0x16, 0x4b, 0x8d, 0xe7, 0x61, 0xb0, 0x6c,
- 0x25, 0xc9, 0x38, 0xcd, 0x9f, 0xf7, 0xc6, 0x50, 0x1d, 0xf3, 0x8d, 0x21, 0x6e, 0x85, 0x9c, 0x13,
- 0xfd, 0xd4, 0xce, 0x5a, 0x21, 0xc7, 0xc5, 0x34, 0x3f, 0x6f, 0x5a, 0x7d, 0xd4, 0x10, 0x61, 0x3a,
- 0xd9, 0xb4, 0x1e, 0x25, 0xa8, 0x38, 0xc5, 0x9d, 0x33, 0xaf, 0xcf, 0x94, 0x9d, 0xd7, 0x11, 0x49,
- 0xbc, 0x26, 0x80, 0xa8, 0xa3, 0x37, 0xcb, 0xc4, 0x59, 0xf9, 0xe7, 0x84, 0xdc, 0x87, 0x94, 0xd9,
- 0xf1, 0x1f, 0x52, 0xd4, 0xbf, 0x2a, 0xf0, 0x72, 0x61, 0xc5, 0x42, 0x9b, 0x89, 0x96, 0xf2, 0x66,
- 0xaa, 0xa5, 0xfc, 0x4e, 0xa1, 0x60, 0xac, 0xaf, 0x74, 0xf3, 0x5f, 0x1a, 0xde, 0x2f, 0xf7, 0xd2,
- 0x90, 0x33, 0x05, 0x9f, 0xff, 0xe4, 0xd0, 0xfa, 0xfe, 0xd3, 0x67, 0x2b, 0x97, 0xbe, 0x7c, 0xb6,
- 0x72, 0xe9, 0xab, 0x67, 0x2b, 0x97, 0x7e, 0x31, 0x5a, 0x51, 0x9e, 0x8e, 0x56, 0x94, 0x2f, 0x47,
- 0x2b, 0xca, 0x57, 0xa3, 0x15, 0xe5, 0xef, 0xa3, 0x15, 0xe5, 0x57, 0x5f, 0xaf, 0x5c, 0xfa, 0x64,
- 0xa9, 0xe0, 0xdf, 0xe8, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xb9, 0xc9, 0xe6, 0x8c, 0xa7, 0x1e,
- 0x00, 0x00,
+ // 2041 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xdd, 0x6f, 0x1b, 0xc7,
+ 0x11, 0xd7, 0x51, 0xa2, 0x44, 0x8d, 0x22, 0xca, 0x5e, 0xa9, 0x16, 0xa3, 0xb4, 0x92, 0x70, 0x31,
+ 0x62, 0x25, 0xb1, 0x8f, 0xb1, 0x92, 0x06, 0x89, 0xdd, 0xba, 0x10, 0x25, 0x37, 0x56, 0x20, 0x45,
+ 0xca, 0x4a, 0xb2, 0xd1, 0xf4, 0x03, 0x59, 0x91, 0x6b, 0xea, 0xa2, 0xfb, 0xc2, 0xdd, 0x52, 0x31,
+ 0xd1, 0x97, 0xfe, 0x01, 0x2d, 0xd2, 0xe7, 0xfe, 0x15, 0xed, 0x53, 0x8b, 0x16, 0x7d, 0x2d, 0xfc,
+ 0x18, 0xf4, 0xa5, 0x79, 0x22, 0x6a, 0xe6, 0xb5, 0x7d, 0x6b, 0x5f, 0x0c, 0x14, 0x28, 0x76, 0x6f,
+ 0xef, 0xfb, 0x4e, 0x3a, 0x16, 0xb0, 0x80, 0xe6, 0x8d, 0xb7, 0x33, 0xf3, 0x9b, 0xd9, 0xd9, 0x99,
+ 0xd9, 0x99, 0x25, 0xdc, 0x38, 0x7d, 0xcf, 0xd3, 0x74, 0xbb, 0x49, 0x1c, 0xbd, 0x49, 0x1c, 0xc7,
+ 0x6b, 0x9e, 0xdd, 0x3e, 0xa6, 0x8c, 0xdc, 0x6e, 0x76, 0xa9, 0x45, 0x5d, 0xc2, 0x68, 0x47, 0x73,
+ 0x5c, 0x9b, 0xd9, 0x68, 0xd1, 0x67, 0xd4, 0x88, 0xa3, 0x6b, 0x9c, 0x51, 0x93, 0x8c, 0x4b, 0xb7,
+ 0xba, 0x3a, 0x3b, 0xe9, 0x1d, 0x6b, 0x6d, 0xdb, 0x6c, 0x76, 0xed, 0xae, 0xdd, 0x14, 0xfc, 0xc7,
+ 0xbd, 0xc7, 0xe2, 0x4b, 0x7c, 0x88, 0x5f, 0x3e, 0xce, 0x92, 0x1a, 0x53, 0xd8, 0xb6, 0x5d, 0xda,
+ 0x3c, 0xcb, 0xe8, 0x5a, 0x7a, 0x27, 0xe2, 0x31, 0x49, 0xfb, 0x44, 0xb7, 0xa8, 0xdb, 0x6f, 0x3a,
+ 0xa7, 0x5d, 0xbe, 0xe0, 0x35, 0x4d, 0xca, 0x48, 0x9e, 0x54, 0xb3, 0x48, 0xca, 0xed, 0x59, 0x4c,
+ 0x37, 0x69, 0x46, 0xe0, 0xdd, 0x8b, 0x04, 0xbc, 0xf6, 0x09, 0x35, 0x49, 0x46, 0xee, 0xed, 0x22,
+ 0xb9, 0x1e, 0xd3, 0x8d, 0xa6, 0x6e, 0x31, 0x8f, 0xb9, 0x69, 0x21, 0xf5, 0xdf, 0x0a, 0xa0, 0x4d,
+ 0xdb, 0x62, 0xae, 0x6d, 0x18, 0xd4, 0xc5, 0xf4, 0x4c, 0xf7, 0x74, 0xdb, 0x42, 0x9f, 0x42, 0x8d,
+ 0xef, 0xa7, 0x43, 0x18, 0x69, 0x28, 0xab, 0xca, 0xda, 0xcc, 0xfa, 0x5b, 0x5a, 0xe4, 0xe9, 0x10,
+ 0x5e, 0x73, 0x4e, 0xbb, 0x7c, 0xc1, 0xd3, 0x38, 0xb7, 0x76, 0x76, 0x5b, 0xdb, 0x3b, 0xfe, 0x8c,
+ 0xb6, 0xd9, 0x2e, 0x65, 0xa4, 0x85, 0x9e, 0x0e, 0x56, 0xc6, 0x86, 0x83, 0x15, 0x88, 0xd6, 0x70,
+ 0x88, 0x8a, 0xf6, 0x60, 0x42, 0xa0, 0x57, 0x04, 0xfa, 0xad, 0x42, 0x74, 0xb9, 0x69, 0x0d, 0x93,
+ 0xcf, 0xef, 0x3f, 0x61, 0xd4, 0xe2, 0xe6, 0xb5, 0x5e, 0x92, 0xd0, 0x13, 0x5b, 0x84, 0x11, 0x2c,
+ 0x80, 0xd0, 0x4d, 0xa8, 0xb9, 0xd2, 0xfc, 0xc6, 0xf8, 0xaa, 0xb2, 0x36, 0xde, 0xba, 0x22, 0xb9,
+ 0x6a, 0xc1, 0xb6, 0x70, 0xc8, 0xa1, 0x3e, 0x55, 0xe0, 0x5a, 0x76, 0xdf, 0x3b, 0xba, 0xc7, 0xd0,
+ 0x4f, 0x32, 0x7b, 0xd7, 0xca, 0xed, 0x9d, 0x4b, 0x8b, 0x9d, 0x87, 0x8a, 0x83, 0x95, 0xd8, 0xbe,
+ 0xf7, 0xa1, 0xaa, 0x33, 0x6a, 0x7a, 0x8d, 0xca, 0xea, 0xf8, 0xda, 0xcc, 0xfa, 0x9b, 0x5a, 0x41,
+ 0x00, 0x6b, 0x59, 0xeb, 0x5a, 0xb3, 0x12, 0xb7, 0xba, 0xcd, 0x11, 0xb0, 0x0f, 0xa4, 0xfe, 0xb2,
+ 0x02, 0xb0, 0x45, 0x1d, 0xc3, 0xee, 0x9b, 0xd4, 0x62, 0x97, 0x70, 0x74, 0xdb, 0x30, 0xe1, 0x39,
+ 0xb4, 0x2d, 0x8f, 0xee, 0x46, 0xe1, 0x0e, 0x22, 0xa3, 0x0e, 0x1c, 0xda, 0x8e, 0x0e, 0x8d, 0x7f,
+ 0x61, 0x01, 0x81, 0x3e, 0x86, 0x49, 0x8f, 0x11, 0xd6, 0xf3, 0xc4, 0x91, 0xcd, 0xac, 0xbf, 0x5e,
+ 0x06, 0x4c, 0x08, 0xb4, 0xea, 0x12, 0x6e, 0xd2, 0xff, 0xc6, 0x12, 0x48, 0xfd, 0xdb, 0x38, 0xcc,
+ 0x47, 0xcc, 0x9b, 0xb6, 0xd5, 0xd1, 0x19, 0x0f, 0xe9, 0xbb, 0x30, 0xc1, 0xfa, 0x0e, 0x15, 0x3e,
+ 0x99, 0x6e, 0xdd, 0x08, 0x8c, 0x39, 0xec, 0x3b, 0xf4, 0xf9, 0x60, 0x65, 0x31, 0x47, 0x84, 0x93,
+ 0xb0, 0x10, 0x42, 0x3b, 0xa1, 0x9d, 0x15, 0x21, 0xfe, 0x4e, 0x52, 0xf9, 0xf3, 0xc1, 0x4a, 0x4e,
+ 0x01, 0xd1, 0x42, 0xa4, 0xa4, 0x89, 0xe8, 0x33, 0xa8, 0x1b, 0xc4, 0x63, 0x47, 0x4e, 0x87, 0x30,
+ 0x7a, 0xa8, 0x9b, 0xb4, 0x31, 0x29, 0x76, 0xff, 0x46, 0xb9, 0x83, 0xe2, 0x12, 0xad, 0x6b, 0xd2,
+ 0x82, 0xfa, 0x4e, 0x02, 0x09, 0xa7, 0x90, 0xd1, 0x19, 0x20, 0xbe, 0x72, 0xe8, 0x12, 0xcb, 0xf3,
+ 0x77, 0xc5, 0xf5, 0x4d, 0x8d, 0xac, 0x6f, 0x49, 0xea, 0x43, 0x3b, 0x19, 0x34, 0x9c, 0xa3, 0x01,
+ 0xbd, 0x06, 0x93, 0x2e, 0x25, 0x9e, 0x6d, 0x35, 0x26, 0x84, 0xc7, 0xc2, 0xe3, 0xc2, 0x62, 0x15,
+ 0x4b, 0x2a, 0x7a, 0x1d, 0xa6, 0x4c, 0xea, 0x79, 0xa4, 0x4b, 0x1b, 0x55, 0xc1, 0x38, 0x27, 0x19,
+ 0xa7, 0x76, 0xfd, 0x65, 0x1c, 0xd0, 0xd5, 0x3f, 0x28, 0x50, 0x8f, 0x8e, 0xe9, 0x12, 0x72, 0xf5,
+ 0x41, 0x32, 0x57, 0x5f, 0x2d, 0x11, 0x9c, 0x05, 0x39, 0xfa, 0x8f, 0x0a, 0xa0, 0x88, 0x09, 0xdb,
+ 0x86, 0x71, 0x4c, 0xda, 0xa7, 0x68, 0x15, 0x26, 0x2c, 0x62, 0x06, 0x31, 0x19, 0x26, 0xc8, 0x47,
+ 0xc4, 0xa4, 0x58, 0x50, 0xd0, 0x17, 0x0a, 0xa0, 0x9e, 0x38, 0xcd, 0xce, 0x86, 0x65, 0xd9, 0x8c,
+ 0x70, 0x07, 0x07, 0x06, 0x6d, 0x96, 0x30, 0x28, 0xd0, 0xa5, 0x1d, 0x65, 0x50, 0xee, 0x5b, 0xcc,
+ 0xed, 0x47, 0x07, 0x9b, 0x65, 0xc0, 0x39, 0xaa, 0xd1, 0x8f, 0x01, 0x5c, 0x89, 0x79, 0x68, 0xcb,
+ 0xb4, 0x2d, 0xae, 0x01, 0x81, 0xfa, 0x4d, 0xdb, 0x7a, 0xac, 0x77, 0xa3, 0xc2, 0x82, 0x43, 0x08,
+ 0x1c, 0x83, 0x5b, 0xba, 0x0f, 0x8b, 0x05, 0x76, 0xa2, 0x2b, 0x30, 0x7e, 0x4a, 0xfb, 0xbe, 0xab,
+ 0x30, 0xff, 0x89, 0x16, 0xa0, 0x7a, 0x46, 0x8c, 0x1e, 0xf5, 0x73, 0x12, 0xfb, 0x1f, 0x77, 0x2a,
+ 0xef, 0x29, 0xea, 0x6f, 0xab, 0xf1, 0x48, 0xe1, 0xf5, 0x06, 0xad, 0xf1, 0xeb, 0xc1, 0x31, 0xf4,
+ 0x36, 0xf1, 0x04, 0x46, 0xb5, 0xf5, 0x92, 0x7f, 0x35, 0xf8, 0x6b, 0x38, 0xa4, 0xa2, 0x9f, 0x42,
+ 0xcd, 0xa3, 0x06, 0x6d, 0x33, 0xdb, 0x95, 0x25, 0xee, 0xed, 0x92, 0x31, 0x45, 0x8e, 0xa9, 0x71,
+ 0x20, 0x45, 0x7d, 0xf8, 0xe0, 0x0b, 0x87, 0x90, 0xe8, 0x63, 0xa8, 0x31, 0x6a, 0x3a, 0x06, 0x61,
+ 0x54, 0x7a, 0x2f, 0x11, 0x57, 0xbc, 0x76, 0x70, 0xb0, 0x7d, 0xbb, 0x73, 0x28, 0xd9, 0x44, 0xf5,
+ 0x0c, 0xe3, 0x34, 0x58, 0xc5, 0x21, 0x0c, 0xfa, 0x11, 0xd4, 0x3c, 0xc6, 0x6f, 0xf5, 0x6e, 0x5f,
+ 0x64, 0xdb, 0x79, 0xd7, 0x4a, 0xbc, 0x8e, 0xfa, 0x22, 0x11, 0x74, 0xb0, 0x82, 0x43, 0x38, 0xb4,
+ 0x01, 0x73, 0xa6, 0x6e, 0x61, 0x4a, 0x3a, 0xfd, 0x03, 0xda, 0xb6, 0xad, 0x8e, 0x27, 0xd2, 0xb4,
+ 0xda, 0x5a, 0x94, 0x42, 0x73, 0xbb, 0x49, 0x32, 0x4e, 0xf3, 0xa3, 0x1d, 0x58, 0x08, 0xae, 0xdd,
+ 0x07, 0xba, 0xc7, 0x6c, 0xb7, 0xbf, 0xa3, 0x9b, 0x3a, 0x13, 0x35, 0xaf, 0xda, 0x6a, 0x0c, 0x07,
+ 0x2b, 0x0b, 0x38, 0x87, 0x8e, 0x73, 0xa5, 0x78, 0x5d, 0x71, 0x48, 0xcf, 0xa3, 0x1d, 0x51, 0xc3,
+ 0x6a, 0x51, 0x5d, 0xd9, 0x17, 0xab, 0x58, 0x52, 0xd1, 0xa3, 0x44, 0x98, 0xd6, 0x46, 0x0b, 0xd3,
+ 0x7a, 0x71, 0x88, 0xa2, 0x23, 0x58, 0x74, 0x5c, 0xbb, 0xeb, 0x52, 0xcf, 0xdb, 0xa2, 0xa4, 0x63,
+ 0xe8, 0x16, 0x0d, 0x3c, 0x33, 0x2d, 0x76, 0xf4, 0xca, 0x70, 0xb0, 0xb2, 0xb8, 0x9f, 0xcf, 0x82,
+ 0x8b, 0x64, 0xd5, 0x5f, 0x55, 0xe1, 0x4a, 0xfa, 0x8e, 0x43, 0x1f, 0x02, 0xb2, 0x8f, 0x3d, 0xea,
+ 0x9e, 0xd1, 0xce, 0x07, 0x7e, 0xe3, 0xc6, 0xbb, 0x1b, 0x45, 0x74, 0x37, 0x61, 0xde, 0xee, 0x65,
+ 0x38, 0x70, 0x8e, 0x94, 0xdf, 0x1f, 0xc9, 0x04, 0xa8, 0x08, 0x43, 0x63, 0xfd, 0x51, 0x26, 0x09,
+ 0x36, 0x60, 0x4e, 0xe6, 0x7e, 0x40, 0x14, 0xc1, 0x1a, 0x3b, 0xf7, 0xa3, 0x24, 0x19, 0xa7, 0xf9,
+ 0xd1, 0x5d, 0x98, 0x75, 0x79, 0x1c, 0x84, 0x00, 0x53, 0x02, 0xe0, 0x5b, 0x12, 0x60, 0x16, 0xc7,
+ 0x89, 0x38, 0xc9, 0x8b, 0x3e, 0x80, 0xab, 0xe4, 0x8c, 0xe8, 0x06, 0x39, 0x36, 0x68, 0x08, 0x30,
+ 0x21, 0x00, 0x5e, 0x96, 0x00, 0x57, 0x37, 0xd2, 0x0c, 0x38, 0x2b, 0x83, 0x76, 0x61, 0xbe, 0x67,
+ 0x65, 0xa1, 0xfc, 0x20, 0x7e, 0x45, 0x42, 0xcd, 0x1f, 0x65, 0x59, 0x70, 0x9e, 0x1c, 0xda, 0x86,
+ 0x79, 0x46, 0x5d, 0x53, 0xb7, 0x08, 0xd3, 0xad, 0x6e, 0x08, 0xe7, 0x9f, 0xfc, 0x22, 0x87, 0x3a,
+ 0xcc, 0x92, 0x71, 0x9e, 0x0c, 0xfa, 0x14, 0xa0, 0x1d, 0x34, 0x08, 0x5e, 0x63, 0x52, 0x54, 0xf4,
+ 0x9b, 0x25, 0xf2, 0x36, 0xec, 0x2a, 0xa2, 0x6a, 0x1a, 0x2e, 0x79, 0x38, 0x86, 0x89, 0xee, 0x40,
+ 0xbd, 0x6d, 0x1b, 0x86, 0x48, 0xa2, 0x4d, 0xbb, 0x67, 0x31, 0x91, 0x07, 0xd5, 0x16, 0xe2, 0x7d,
+ 0xc3, 0x66, 0x82, 0x82, 0x53, 0x9c, 0xea, 0x9f, 0x94, 0xf8, 0x8d, 0x15, 0x54, 0x06, 0x74, 0x27,
+ 0xd1, 0x45, 0xbd, 0x96, 0xea, 0xa2, 0xae, 0x65, 0x25, 0x62, 0x4d, 0x94, 0x0e, 0xb3, 0x3c, 0x8f,
+ 0x74, 0xab, 0xeb, 0xc7, 0x8e, 0xac, 0xae, 0x6f, 0x9d, 0x9b, 0x95, 0x21, 0x77, 0xec, 0x8e, 0xbd,
+ 0x2a, 0xc2, 0x27, 0x4e, 0xc4, 0x49, 0x64, 0xf5, 0x1e, 0xd4, 0x93, 0x29, 0x9d, 0x18, 0x0f, 0x94,
+ 0x0b, 0xc7, 0x83, 0xaf, 0x15, 0x58, 0x2c, 0xd0, 0x8e, 0x0c, 0xa8, 0x9b, 0xe4, 0x49, 0x2c, 0x62,
+ 0x2e, 0x6c, 0xb3, 0xf9, 0x00, 0xa6, 0xf9, 0x03, 0x98, 0xb6, 0x6d, 0xb1, 0x3d, 0xf7, 0x80, 0xb9,
+ 0xba, 0xd5, 0xf5, 0xcf, 0x61, 0x37, 0x81, 0x85, 0x53, 0xd8, 0xe8, 0x13, 0xa8, 0x99, 0xe4, 0xc9,
+ 0x41, 0xcf, 0xed, 0xe6, 0xf9, 0xab, 0x9c, 0x1e, 0x71, 0x15, 0xed, 0x4a, 0x14, 0x1c, 0xe2, 0xa9,
+ 0x7f, 0x56, 0x60, 0x35, 0xb1, 0x4b, 0x5e, 0x76, 0xe8, 0xe3, 0x9e, 0x71, 0x40, 0xa3, 0x13, 0x7f,
+ 0x13, 0xa6, 0x1d, 0xe2, 0x32, 0x3d, 0x2c, 0x3d, 0xd5, 0xd6, 0xec, 0x70, 0xb0, 0x32, 0xbd, 0x1f,
+ 0x2c, 0xe2, 0x88, 0x9e, 0xe3, 0x9b, 0xca, 0x8b, 0xf3, 0x8d, 0xfa, 0x1f, 0x05, 0xaa, 0x07, 0x6d,
+ 0x62, 0xd0, 0x4b, 0x18, 0x7a, 0xb6, 0x12, 0x43, 0x8f, 0x5a, 0x18, 0xb3, 0xc2, 0x9e, 0xc2, 0x79,
+ 0x67, 0x27, 0x35, 0xef, 0x5c, 0xbf, 0x00, 0xe7, 0xfc, 0x51, 0xe7, 0x7d, 0x98, 0x0e, 0xd5, 0x25,
+ 0xea, 0xbb, 0x72, 0x51, 0x7d, 0x57, 0x7f, 0x53, 0x81, 0x99, 0x98, 0x8a, 0xd1, 0xa4, 0xb9, 0xbb,
+ 0x63, 0x2d, 0x12, 0x2f, 0x5c, 0xeb, 0x65, 0x36, 0xa2, 0x05, 0xed, 0x90, 0xdf, 0x79, 0x46, 0x7d,
+ 0x47, 0xb6, 0x4b, 0xba, 0x07, 0x75, 0x46, 0xdc, 0x2e, 0x65, 0x01, 0x4d, 0x38, 0x6c, 0x3a, 0x1a,
+ 0x7b, 0x0e, 0x13, 0x54, 0x9c, 0xe2, 0x5e, 0xba, 0x0b, 0xb3, 0x09, 0x65, 0x23, 0xb5, 0x8f, 0x5f,
+ 0x70, 0xe7, 0x44, 0xa9, 0x70, 0x09, 0xd1, 0xf5, 0x61, 0x22, 0xba, 0xd6, 0x8a, 0x9d, 0x19, 0x4b,
+ 0xd0, 0xa2, 0x18, 0xc3, 0xa9, 0x18, 0x7b, 0xa3, 0x14, 0xda, 0xf9, 0x91, 0xf6, 0xcf, 0x0a, 0x2c,
+ 0xc4, 0xb8, 0xa3, 0xa9, 0xfa, 0x7b, 0x89, 0xfb, 0x60, 0x2d, 0x75, 0x1f, 0x34, 0xf2, 0x64, 0x5e,
+ 0xd8, 0x58, 0x9d, 0x3f, 0xea, 0x8e, 0xff, 0x3f, 0x8e, 0xba, 0x7f, 0x54, 0x60, 0x2e, 0xe6, 0xbb,
+ 0x4b, 0x98, 0x75, 0xb7, 0x93, 0xb3, 0xee, 0xf5, 0x32, 0x41, 0x53, 0x30, 0xec, 0xde, 0x81, 0xf9,
+ 0x18, 0xd3, 0x9e, 0xdb, 0xd1, 0x2d, 0x62, 0x78, 0xe8, 0x55, 0xa8, 0x7a, 0x8c, 0xb8, 0x2c, 0xb8,
+ 0x44, 0x02, 0xd9, 0x03, 0xbe, 0x88, 0x7d, 0x9a, 0xfa, 0x2f, 0x05, 0x9a, 0x31, 0xe1, 0x7d, 0xea,
+ 0x7a, 0xba, 0xc7, 0xa8, 0xc5, 0x1e, 0xda, 0x46, 0xcf, 0xa4, 0x9b, 0x06, 0xd1, 0x4d, 0x4c, 0xf9,
+ 0x82, 0x6e, 0x5b, 0xfb, 0xb6, 0xa1, 0xb7, 0xfb, 0x88, 0xc0, 0xcc, 0xe7, 0x27, 0xd4, 0xda, 0xa2,
+ 0x06, 0x65, 0xb4, 0x23, 0x43, 0xf1, 0x07, 0x12, 0x7e, 0xe6, 0x51, 0x44, 0x7a, 0x3e, 0x58, 0x59,
+ 0x2b, 0x83, 0x28, 0x22, 0x34, 0x8e, 0x89, 0x7e, 0x06, 0xc0, 0x3f, 0x45, 0x2d, 0xeb, 0xc8, 0x60,
+ 0xbd, 0x17, 0x64, 0xf4, 0xa3, 0x90, 0x32, 0x92, 0x82, 0x18, 0xa2, 0xfa, 0xbb, 0x5a, 0xe2, 0xbc,
+ 0xbf, 0xf1, 0x13, 0xeb, 0xcf, 0x61, 0xe1, 0x2c, 0xf2, 0x4e, 0xc0, 0xc0, 0x3b, 0xfc, 0xf1, 0xf4,
+ 0x2b, 0x60, 0x08, 0x9f, 0xe7, 0xd7, 0xd6, 0xb7, 0xa5, 0x92, 0x85, 0x87, 0x39, 0x70, 0x38, 0x57,
+ 0x09, 0xfa, 0x2e, 0xcc, 0xf0, 0xe9, 0x48, 0x6f, 0xd3, 0x8f, 0x88, 0x19, 0xe4, 0xe2, 0x7c, 0x10,
+ 0x2f, 0x07, 0x11, 0x09, 0xc7, 0xf9, 0xd0, 0x09, 0xcc, 0x3b, 0x76, 0x67, 0x97, 0x58, 0xa4, 0x4b,
+ 0x79, 0x23, 0xe8, 0x1f, 0xa5, 0x18, 0x63, 0xa7, 0x5b, 0xef, 0x06, 0x93, 0xc4, 0x7e, 0x96, 0xe5,
+ 0x39, 0x9f, 0x07, 0xb3, 0xcb, 0x22, 0x08, 0xf2, 0x20, 0x91, 0x0b, 0xf5, 0x9e, 0xec, 0xc7, 0xe4,
+ 0x54, 0xef, 0xbf, 0xd7, 0xad, 0x97, 0x49, 0xca, 0xa3, 0x84, 0x64, 0x74, 0x61, 0x26, 0xd7, 0x71,
+ 0x4a, 0x43, 0xe1, 0x94, 0x5e, 0xfb, 0x9f, 0xa6, 0xf4, 0x9c, 0x67, 0x83, 0xe9, 0x11, 0x9f, 0x0d,
+ 0xfe, 0xa2, 0xc0, 0x75, 0xa7, 0x44, 0x2e, 0x35, 0x40, 0xf8, 0xe6, 0x41, 0x19, 0xdf, 0x94, 0xc9,
+ 0xcd, 0xd6, 0xda, 0x70, 0xb0, 0x72, 0xbd, 0x0c, 0x27, 0x2e, 0x65, 0x1f, 0x7a, 0x08, 0x35, 0x5b,
+ 0xd6, 0xc0, 0xc6, 0x8c, 0xb0, 0xf5, 0x66, 0x19, 0x5b, 0x83, 0xba, 0xe9, 0xa7, 0x65, 0xf0, 0x85,
+ 0x43, 0x2c, 0xf5, 0xf7, 0x55, 0xb8, 0x9a, 0xb9, 0xc1, 0xd1, 0x0f, 0xcf, 0x79, 0x32, 0xb8, 0xf6,
+ 0xc2, 0x9e, 0x0b, 0x32, 0xb3, 0xfe, 0xf8, 0x08, 0xb3, 0xfe, 0x06, 0xcc, 0xb5, 0x7b, 0xae, 0x4b,
+ 0x2d, 0x96, 0x9a, 0xf4, 0xc3, 0x60, 0xd9, 0x4c, 0x92, 0x71, 0x9a, 0x3f, 0xef, 0xb9, 0xa2, 0x3a,
+ 0xe2, 0x73, 0x45, 0xdc, 0x0a, 0x39, 0x27, 0xfa, 0xa9, 0x9d, 0xb5, 0x42, 0x8e, 0x8b, 0x69, 0x7e,
+ 0xde, 0xb4, 0xfa, 0xa8, 0x21, 0xc2, 0x54, 0xb2, 0x69, 0x3d, 0x4a, 0x50, 0x71, 0x8a, 0x3b, 0x67,
+ 0x5e, 0x9f, 0x2e, 0x3b, 0xaf, 0x23, 0x92, 0x78, 0x4d, 0x00, 0x51, 0x47, 0x6f, 0x95, 0x89, 0xb3,
+ 0xf2, 0xcf, 0x09, 0xb9, 0x6f, 0x32, 0x33, 0xa3, 0xbf, 0xc9, 0xa8, 0x7f, 0x55, 0xe0, 0xe5, 0xc2,
+ 0x8a, 0x85, 0x36, 0x12, 0x2d, 0xe5, 0xad, 0x54, 0x4b, 0xf9, 0x9d, 0x42, 0xc1, 0x58, 0x5f, 0xe9,
+ 0xe6, 0xbf, 0x34, 0xbc, 0x5f, 0xee, 0xa5, 0x21, 0x67, 0x0a, 0xbe, 0xf8, 0xc9, 0xa1, 0xf5, 0xfd,
+ 0xa7, 0xcf, 0x96, 0xc7, 0xbe, 0x7c, 0xb6, 0x3c, 0xf6, 0xd5, 0xb3, 0xe5, 0xb1, 0x5f, 0x0c, 0x97,
+ 0x95, 0xa7, 0xc3, 0x65, 0xe5, 0xcb, 0xe1, 0xb2, 0xf2, 0xd5, 0x70, 0x59, 0xf9, 0xfb, 0x70, 0x59,
+ 0xf9, 0xf5, 0xd7, 0xcb, 0x63, 0x9f, 0x2c, 0x16, 0xfc, 0xb1, 0xfd, 0xdf, 0x00, 0x00, 0x00, 0xff,
+ 0xff, 0x40, 0xa4, 0x4b, 0xb9, 0xf2, 0x1e, 0x00, 0x00,
}
func (m *ControllerRevision) Marshal() (dAtA []byte, err error) {
@@ -1289,6 +1290,11 @@ func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.TerminatingReplicas != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas))
+ i--
+ dAtA[i] = 0x48
+ }
if m.CollisionCount != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount))
i--
@@ -2225,6 +2231,9 @@ func (m *DeploymentStatus) Size() (n int) {
if m.CollisionCount != nil {
n += 1 + sovGenerated(uint64(*m.CollisionCount))
}
+ if m.TerminatingReplicas != nil {
+ n += 1 + sovGenerated(uint64(*m.TerminatingReplicas))
+ }
return n
}
@@ -2627,6 +2636,7 @@ func (this *DeploymentStatus) String() string {
`Conditions:` + repeatedStringForConditions + `,`,
`ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
`CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
+ `TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`,
`}`,
}, "")
return s
@@ -4337,6 +4347,26 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error {
}
}
m.CollisionCount = &v
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TerminatingReplicas = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto
index 46d7bfdf9..0601efc3c 100644
--- a/vendor/k8s.io/api/apps/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto
@@ -179,33 +179,40 @@ message DeploymentSpec {
// DeploymentStatus is the most recently observed status of the Deployment.
message DeploymentStatus {
- // observedGeneration is the generation observed by the deployment controller.
+ // The generation observed by the deployment controller.
// +optional
optional int64 observedGeneration = 1;
- // replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector).
+ // Total number of non-terminating pods targeted by this deployment (their labels match the selector).
// +optional
optional int32 replicas = 2;
- // updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec.
+ // Total number of non-terminating pods targeted by this deployment that have the desired template spec.
// +optional
optional int32 updatedReplicas = 3;
- // readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.
+ // Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
// +optional
optional int32 readyReplicas = 7;
- // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+ // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
// +optional
optional int32 availableReplicas = 4;
- // unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of
+ // Total number of unavailable pods targeted by this deployment. This is the total number of
// pods that are still required for the deployment to have 100% available capacity. They may
// either be pods that are running but not yet available or pods that still have not been created.
// +optional
optional int32 unavailableReplicas = 5;
- // Conditions represent the latest available observations of a deployment's current state.
+ // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
+ // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // +optional
+ optional int32 terminatingReplicas = 9;
+
+ // Represents the latest available observations of a deployment's current state.
// +patchMergeKey=type
// +patchStrategy=merge
// +listType=map
@@ -455,6 +462,7 @@ message StatefulSetSpec {
// the network identity of the set. Pods get DNS/hostnames that follow the
// pattern: pod-specific-string.serviceName.default.svc.cluster.local
// where "pod-specific-string" is managed by the StatefulSet controller.
+ // +optional
optional string serviceName = 5;
// podManagementPolicy controls how pods are created during initial scale up,
diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go
index bc4851957..5530c990d 100644
--- a/vendor/k8s.io/api/apps/v1beta1/types.go
+++ b/vendor/k8s.io/api/apps/v1beta1/types.go
@@ -259,6 +259,7 @@ type StatefulSetSpec struct {
// the network identity of the set. Pods get DNS/hostnames that follow the
// pattern: pod-specific-string.serviceName.default.svc.cluster.local
// where "pod-specific-string" is managed by the StatefulSet controller.
+ // +optional
ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"`
// podManagementPolicy controls how pods are created during initial scale up,
@@ -548,33 +549,40 @@ type RollingUpdateDeployment struct {
// DeploymentStatus is the most recently observed status of the Deployment.
type DeploymentStatus struct {
- // observedGeneration is the generation observed by the deployment controller.
+ // The generation observed by the deployment controller.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
- // replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector).
+ // Total number of non-terminating pods targeted by this deployment (their labels match the selector).
// +optional
Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"`
- // updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec.
+ // Total number of non-terminating pods targeted by this deployment that have the desired template spec.
// +optional
UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"`
- // readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.
+ // Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
// +optional
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"`
- // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+ // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
// +optional
AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"`
- // unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of
+ // Total number of unavailable pods targeted by this deployment. This is the total number of
// pods that are still required for the deployment to have 100% available capacity. They may
// either be pods that are running but not yet available or pods that still have not been created.
// +optional
UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
- // Conditions represent the latest available observations of a deployment's current state.
+ // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
+ // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // +optional
+ TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,9,opt,name=terminatingReplicas"`
+
+ // Represents the latest available observations of a deployment's current state.
// +patchMergeKey=type
// +patchStrategy=merge
// +listType=map
diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
index 1381d75dc..02ea5f7f2 100644
--- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
@@ -113,13 +113,14 @@ func (DeploymentSpec) SwaggerDoc() map[string]string {
var map_DeploymentStatus = map[string]string{
"": "DeploymentStatus is the most recently observed status of the Deployment.",
- "observedGeneration": "observedGeneration is the generation observed by the deployment controller.",
- "replicas": "replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector).",
- "updatedReplicas": "updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec.",
- "readyReplicas": "readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.",
- "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
- "unavailableReplicas": "unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
- "conditions": "Conditions represent the latest available observations of a deployment's current state.",
+ "observedGeneration": "The generation observed by the deployment controller.",
+ "replicas": "Total number of non-terminating pods targeted by this deployment (their labels match the selector).",
+ "updatedReplicas": "Total number of non-terminating pods targeted by this deployment that have the desired template spec.",
+ "readyReplicas": "Total number of non-terminating pods targeted by this Deployment with a Ready Condition.",
+ "availableReplicas": "Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.",
+ "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
+ "terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
+ "conditions": "Represents the latest available observations of a deployment's current state.",
"collisionCount": "collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
}
diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go
index dd73f1a5a..e8594766c 100644
--- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go
@@ -246,6 +246,11 @@ func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
*out = *in
+ if in.TerminatingReplicas != nil {
+ in, out := &in.TerminatingReplicas, &out.TerminatingReplicas
+ *out = new(int32)
+ **out = **in
+ }
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]DeploymentCondition, len(*in))
diff --git a/vendor/k8s.io/api/apps/v1beta2/doc.go b/vendor/k8s.io/api/apps/v1beta2/doc.go
index ac91fddfd..7d28fe42d 100644
--- a/vendor/k8s.io/api/apps/v1beta2/doc.go
+++ b/vendor/k8s.io/api/apps/v1beta2/doc.go
@@ -19,4 +19,4 @@ limitations under the License.
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
-package v1beta2 // import "k8s.io/api/apps/v1beta2"
+package v1beta2
diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go
index 1c3d3be5b..9fcba6feb 100644
--- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go
+++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go
@@ -1017,153 +1017,155 @@ func init() {
}
var fileDescriptor_c423c016abf485d4 = []byte{
- // 2328 bytes of a gzipped FileDescriptorProto
+ // 2359 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7,
- 0x15, 0xf7, 0xf2, 0x43, 0x26, 0x87, 0x96, 0x64, 0x8f, 0x54, 0x89, 0xb1, 0x5b, 0xd2, 0x58, 0x1b,
- 0xb6, 0x12, 0xdb, 0xa4, 0xad, 0x7c, 0x20, 0xb1, 0xdb, 0x04, 0xa2, 0x94, 0xda, 0x0e, 0xf4, 0xc1,
- 0x0c, 0x2d, 0x07, 0x0d, 0xfa, 0xe1, 0x11, 0x39, 0xa6, 0x36, 0xde, 0x2f, 0xec, 0x0e, 0x15, 0x13,
- 0xbd, 0xf4, 0x5a, 0xa0, 0x40, 0xdb, 0x6b, 0xff, 0x89, 0xa2, 0x97, 0xa2, 0x68, 0xd0, 0x4b, 0x11,
- 0x04, 0x3e, 0x06, 0xbd, 0x24, 0x27, 0xa2, 0x66, 0x4e, 0x45, 0xd1, 0x5b, 0x7b, 0x31, 0x50, 0xa0,
- 0x98, 0xd9, 0xd9, 0xef, 0x5d, 0x73, 0xa9, 0xd8, 0x4a, 0x13, 0xe4, 0xc6, 0x9d, 0xf7, 0xde, 0x6f,
- 0xde, 0xcc, 0xbc, 0x37, 0xef, 0x37, 0x33, 0x04, 0x17, 0x1f, 0xbc, 0x6e, 0x37, 0x14, 0xa3, 0x89,
- 0x4d, 0xa5, 0x89, 0x4d, 0xd3, 0x6e, 0x1e, 0x5c, 0xdb, 0x23, 0x14, 0xaf, 0x36, 0xfb, 0x44, 0x27,
- 0x16, 0xa6, 0xa4, 0xd7, 0x30, 0x2d, 0x83, 0x1a, 0x70, 0xd9, 0x51, 0x6c, 0x60, 0x53, 0x69, 0x30,
- 0xc5, 0x86, 0x50, 0x3c, 0x7d, 0xa5, 0xaf, 0xd0, 0xfd, 0xc1, 0x5e, 0xa3, 0x6b, 0x68, 0xcd, 0xbe,
- 0xd1, 0x37, 0x9a, 0x5c, 0x7f, 0x6f, 0x70, 0x9f, 0x7f, 0xf1, 0x0f, 0xfe, 0xcb, 0xc1, 0x39, 0x2d,
- 0x07, 0x3a, 0xec, 0x1a, 0x16, 0x69, 0x1e, 0x5c, 0x8b, 0xf6, 0x75, 0xfa, 0x15, 0x5f, 0x47, 0xc3,
- 0xdd, 0x7d, 0x45, 0x27, 0xd6, 0xb0, 0x69, 0x3e, 0xe8, 0xb3, 0x06, 0xbb, 0xa9, 0x11, 0x8a, 0x93,
- 0xac, 0x9a, 0x69, 0x56, 0xd6, 0x40, 0xa7, 0x8a, 0x46, 0x62, 0x06, 0xaf, 0x4d, 0x32, 0xb0, 0xbb,
- 0xfb, 0x44, 0xc3, 0x31, 0xbb, 0x97, 0xd3, 0xec, 0x06, 0x54, 0x51, 0x9b, 0x8a, 0x4e, 0x6d, 0x6a,
- 0x45, 0x8d, 0xe4, 0xff, 0x48, 0x00, 0xae, 0x1b, 0x3a, 0xb5, 0x0c, 0x55, 0x25, 0x16, 0x22, 0x07,
- 0x8a, 0xad, 0x18, 0x3a, 0xbc, 0x07, 0x4a, 0x6c, 0x3c, 0x3d, 0x4c, 0x71, 0x55, 0x3a, 0x2b, 0xad,
- 0x54, 0x56, 0xaf, 0x36, 0xfc, 0x99, 0xf6, 0xe0, 0x1b, 0xe6, 0x83, 0x3e, 0x6b, 0xb0, 0x1b, 0x4c,
- 0xbb, 0x71, 0x70, 0xad, 0xb1, 0xb3, 0xf7, 0x01, 0xe9, 0xd2, 0x2d, 0x42, 0x71, 0x0b, 0x3e, 0x1a,
- 0xd5, 0x8f, 0x8d, 0x47, 0x75, 0xe0, 0xb7, 0x21, 0x0f, 0x15, 0xee, 0x80, 0x02, 0x47, 0xcf, 0x71,
- 0xf4, 0x2b, 0xa9, 0xe8, 0x62, 0xd0, 0x0d, 0x84, 0x3f, 0x7c, 0xfb, 0x21, 0x25, 0x3a, 0x73, 0xaf,
- 0x75, 0x42, 0x40, 0x17, 0x36, 0x30, 0xc5, 0x88, 0x03, 0xc1, 0xcb, 0xa0, 0x64, 0x09, 0xf7, 0xab,
- 0xf9, 0xb3, 0xd2, 0x4a, 0xbe, 0x75, 0x52, 0x68, 0x95, 0xdc, 0x61, 0x21, 0x4f, 0x43, 0x7e, 0x24,
- 0x81, 0xa5, 0xf8, 0xb8, 0x37, 0x15, 0x9b, 0xc2, 0x1f, 0xc7, 0xc6, 0xde, 0xc8, 0x36, 0x76, 0x66,
- 0xcd, 0x47, 0xee, 0x75, 0xec, 0xb6, 0x04, 0xc6, 0xdd, 0x06, 0x45, 0x85, 0x12, 0xcd, 0xae, 0xe6,
- 0xce, 0xe6, 0x57, 0x2a, 0xab, 0x97, 0x1a, 0x29, 0x01, 0xdc, 0x88, 0x7b, 0xd7, 0x9a, 0x15, 0xb8,
- 0xc5, 0xdb, 0x0c, 0x01, 0x39, 0x40, 0xf2, 0x2f, 0x73, 0xa0, 0xbc, 0x81, 0x89, 0x66, 0xe8, 0x1d,
- 0x42, 0x8f, 0x60, 0xe5, 0x6e, 0x81, 0x82, 0x6d, 0x92, 0xae, 0x58, 0xb9, 0x0b, 0xa9, 0x03, 0xf0,
- 0x7c, 0xea, 0x98, 0xa4, 0xeb, 0x2f, 0x19, 0xfb, 0x42, 0x1c, 0x01, 0xb6, 0xc1, 0x8c, 0x4d, 0x31,
- 0x1d, 0xd8, 0x7c, 0xc1, 0x2a, 0xab, 0x2b, 0x19, 0xb0, 0xb8, 0x7e, 0x6b, 0x4e, 0xa0, 0xcd, 0x38,
- 0xdf, 0x48, 0xe0, 0xc8, 0xff, 0xc8, 0x01, 0xe8, 0xe9, 0xae, 0x1b, 0x7a, 0x4f, 0xa1, 0x2c, 0x9c,
- 0xaf, 0x83, 0x02, 0x1d, 0x9a, 0x84, 0x4f, 0x48, 0xb9, 0x75, 0xc1, 0x75, 0xe5, 0xce, 0xd0, 0x24,
- 0x4f, 0x46, 0xf5, 0xa5, 0xb8, 0x05, 0x93, 0x20, 0x6e, 0x03, 0x37, 0x3d, 0x27, 0x73, 0xdc, 0xfa,
- 0x95, 0x70, 0xd7, 0x4f, 0x46, 0xf5, 0x84, 0xbd, 0xa3, 0xe1, 0x21, 0x85, 0x1d, 0x84, 0x07, 0x00,
- 0xaa, 0xd8, 0xa6, 0x77, 0x2c, 0xac, 0xdb, 0x4e, 0x4f, 0x8a, 0x46, 0xc4, 0xf0, 0x5f, 0xca, 0xb6,
- 0x50, 0xcc, 0xa2, 0x75, 0x5a, 0x78, 0x01, 0x37, 0x63, 0x68, 0x28, 0xa1, 0x07, 0x78, 0x01, 0xcc,
- 0x58, 0x04, 0xdb, 0x86, 0x5e, 0x2d, 0xf0, 0x51, 0x78, 0x13, 0x88, 0x78, 0x2b, 0x12, 0x52, 0xf8,
- 0x22, 0x38, 0xae, 0x11, 0xdb, 0xc6, 0x7d, 0x52, 0x2d, 0x72, 0xc5, 0x79, 0xa1, 0x78, 0x7c, 0xcb,
- 0x69, 0x46, 0xae, 0x5c, 0xfe, 0xa3, 0x04, 0x66, 0xbd, 0x99, 0x3b, 0x82, 0xcc, 0xb9, 0x19, 0xce,
- 0x1c, 0x79, 0x72, 0xb0, 0xa4, 0x24, 0xcc, 0xc7, 0xf9, 0x80, 0xe3, 0x2c, 0x1c, 0xe1, 0x4f, 0x40,
- 0xc9, 0x26, 0x2a, 0xe9, 0x52, 0xc3, 0x12, 0x8e, 0xbf, 0x9c, 0xd1, 0x71, 0xbc, 0x47, 0xd4, 0x8e,
- 0x30, 0x6d, 0x9d, 0x60, 0x9e, 0xbb, 0x5f, 0xc8, 0x83, 0x84, 0xef, 0x82, 0x12, 0x25, 0x9a, 0xa9,
- 0x62, 0x4a, 0x44, 0xd6, 0x9c, 0x0b, 0x3a, 0xcf, 0x62, 0x86, 0x81, 0xb5, 0x8d, 0xde, 0x1d, 0xa1,
- 0xc6, 0x53, 0xc6, 0x9b, 0x0c, 0xb7, 0x15, 0x79, 0x30, 0xd0, 0x04, 0x73, 0x03, 0xb3, 0xc7, 0x34,
- 0x29, 0xdb, 0xce, 0xfb, 0x43, 0x11, 0x43, 0x57, 0x27, 0xcf, 0xca, 0x6e, 0xc8, 0xae, 0xb5, 0x24,
- 0x7a, 0x99, 0x0b, 0xb7, 0xa3, 0x08, 0x3e, 0x5c, 0x03, 0xf3, 0x9a, 0xa2, 0x23, 0x82, 0x7b, 0xc3,
- 0x0e, 0xe9, 0x1a, 0x7a, 0xcf, 0xe6, 0xa1, 0x54, 0x6c, 0x2d, 0x0b, 0x80, 0xf9, 0xad, 0xb0, 0x18,
- 0x45, 0xf5, 0xe1, 0x26, 0x58, 0x74, 0x37, 0xe0, 0x5b, 0x8a, 0x4d, 0x0d, 0x6b, 0xb8, 0xa9, 0x68,
- 0x0a, 0xad, 0xce, 0x70, 0x9c, 0xea, 0x78, 0x54, 0x5f, 0x44, 0x09, 0x72, 0x94, 0x68, 0x25, 0xff,
- 0x76, 0x06, 0xcc, 0x47, 0xf6, 0x05, 0x78, 0x17, 0x2c, 0x75, 0x07, 0x96, 0x45, 0x74, 0xba, 0x3d,
- 0xd0, 0xf6, 0x88, 0xd5, 0xe9, 0xee, 0x93, 0xde, 0x40, 0x25, 0x3d, 0xbe, 0xac, 0xc5, 0x56, 0x4d,
- 0xf8, 0xba, 0xb4, 0x9e, 0xa8, 0x85, 0x52, 0xac, 0xe1, 0x3b, 0x00, 0xea, 0xbc, 0x69, 0x4b, 0xb1,
- 0x6d, 0x0f, 0x33, 0xc7, 0x31, 0xbd, 0x54, 0xdc, 0x8e, 0x69, 0xa0, 0x04, 0x2b, 0xe6, 0x63, 0x8f,
- 0xd8, 0x8a, 0x45, 0x7a, 0x51, 0x1f, 0xf3, 0x61, 0x1f, 0x37, 0x12, 0xb5, 0x50, 0x8a, 0x35, 0x7c,
- 0x15, 0x54, 0x9c, 0xde, 0xf8, 0x9c, 0x8b, 0xc5, 0x59, 0x10, 0x60, 0x95, 0x6d, 0x5f, 0x84, 0x82,
- 0x7a, 0x6c, 0x68, 0xc6, 0x9e, 0x4d, 0xac, 0x03, 0xd2, 0xbb, 0xe9, 0x90, 0x03, 0x56, 0x41, 0x8b,
- 0xbc, 0x82, 0x7a, 0x43, 0xdb, 0x89, 0x69, 0xa0, 0x04, 0x2b, 0x36, 0x34, 0x27, 0x6a, 0x62, 0x43,
- 0x9b, 0x09, 0x0f, 0x6d, 0x37, 0x51, 0x0b, 0xa5, 0x58, 0xb3, 0xd8, 0x73, 0x5c, 0x5e, 0x3b, 0xc0,
- 0x8a, 0x8a, 0xf7, 0x54, 0x52, 0x3d, 0x1e, 0x8e, 0xbd, 0xed, 0xb0, 0x18, 0x45, 0xf5, 0xe1, 0x4d,
- 0x70, 0xca, 0x69, 0xda, 0xd5, 0xb1, 0x07, 0x52, 0xe2, 0x20, 0x2f, 0x08, 0x90, 0x53, 0xdb, 0x51,
- 0x05, 0x14, 0xb7, 0x81, 0xd7, 0xc1, 0x5c, 0xd7, 0x50, 0x55, 0x1e, 0x8f, 0xeb, 0xc6, 0x40, 0xa7,
- 0xd5, 0x32, 0x47, 0x81, 0x2c, 0x87, 0xd6, 0x43, 0x12, 0x14, 0xd1, 0x84, 0x3f, 0x03, 0xa0, 0xeb,
- 0x16, 0x06, 0xbb, 0x0a, 0x26, 0x30, 0x80, 0x78, 0x59, 0xf2, 0x2b, 0xb3, 0xd7, 0x64, 0xa3, 0x00,
- 0xa4, 0xfc, 0xb1, 0x04, 0x96, 0x53, 0x12, 0x1d, 0xbe, 0x15, 0x2a, 0x82, 0x97, 0x22, 0x45, 0xf0,
- 0x4c, 0x8a, 0x59, 0xa0, 0x12, 0xee, 0x83, 0x59, 0x46, 0x48, 0x14, 0xbd, 0xef, 0xa8, 0x88, 0xbd,
- 0xac, 0x99, 0x3a, 0x00, 0x14, 0xd4, 0xf6, 0x77, 0xe5, 0x53, 0xe3, 0x51, 0x7d, 0x36, 0x24, 0x43,
- 0x61, 0x60, 0xf9, 0x57, 0x39, 0x00, 0x36, 0x88, 0xa9, 0x1a, 0x43, 0x8d, 0xe8, 0x47, 0xc1, 0x69,
- 0x6e, 0x87, 0x38, 0xcd, 0xc5, 0xf4, 0x25, 0xf1, 0x9c, 0x4a, 0x25, 0x35, 0xef, 0x46, 0x48, 0xcd,
- 0x8b, 0x59, 0xc0, 0x9e, 0xce, 0x6a, 0x3e, 0xcb, 0x83, 0x05, 0x5f, 0xd9, 0xa7, 0x35, 0x37, 0x42,
- 0x2b, 0x7a, 0x31, 0xb2, 0xa2, 0xcb, 0x09, 0x26, 0xcf, 0x8d, 0xd7, 0x7c, 0x00, 0xe6, 0x18, 0xeb,
- 0x70, 0xd6, 0x8f, 0x73, 0x9a, 0x99, 0xa9, 0x39, 0x8d, 0x57, 0x89, 0x36, 0x43, 0x48, 0x28, 0x82,
- 0x9c, 0xc2, 0xa1, 0x8e, 0x7f, 0x1d, 0x39, 0xd4, 0x9f, 0x24, 0x30, 0xe7, 0x2f, 0xd3, 0x11, 0x90,
- 0xa8, 0x5b, 0x61, 0x12, 0x75, 0x2e, 0x43, 0x70, 0xa6, 0xb0, 0xa8, 0xcf, 0x0a, 0x41, 0xd7, 0x39,
- 0x8d, 0x5a, 0x61, 0x47, 0x30, 0x53, 0x55, 0xba, 0xd8, 0x16, 0xf5, 0xf6, 0x84, 0x73, 0xfc, 0x72,
- 0xda, 0x90, 0x27, 0x0d, 0x11, 0xae, 0xdc, 0xf3, 0x25, 0x5c, 0xf9, 0x67, 0x43, 0xb8, 0x7e, 0x04,
- 0x4a, 0xb6, 0x4b, 0xb5, 0x0a, 0x1c, 0xf2, 0x52, 0xa6, 0xc4, 0x16, 0x2c, 0xcb, 0x83, 0xf6, 0xf8,
- 0x95, 0x07, 0x97, 0xc4, 0xac, 0x8a, 0x5f, 0x25, 0xb3, 0x62, 0x81, 0x6e, 0xe2, 0x81, 0x4d, 0x7a,
- 0x3c, 0xa9, 0x4a, 0x7e, 0xa0, 0xb7, 0x79, 0x2b, 0x12, 0x52, 0xb8, 0x0b, 0x96, 0x4d, 0xcb, 0xe8,
- 0x5b, 0xc4, 0xb6, 0x37, 0x08, 0xee, 0xa9, 0x8a, 0x4e, 0xdc, 0x01, 0x38, 0x35, 0xf1, 0xcc, 0x78,
- 0x54, 0x5f, 0x6e, 0x27, 0xab, 0xa0, 0x34, 0x5b, 0xf9, 0xaf, 0x05, 0x70, 0x32, 0xba, 0x37, 0xa6,
- 0xd0, 0x14, 0xe9, 0x50, 0x34, 0xe5, 0x72, 0x20, 0x4e, 0x1d, 0x0e, 0x17, 0xb8, 0x2a, 0x88, 0xc5,
- 0xea, 0x1a, 0x98, 0x17, 0xb4, 0xc4, 0x15, 0x0a, 0xa2, 0xe6, 0x2d, 0xcf, 0x6e, 0x58, 0x8c, 0xa2,
- 0xfa, 0xf0, 0x06, 0x98, 0xb5, 0x38, 0xf3, 0x72, 0x01, 0x1c, 0xf6, 0xf2, 0x1d, 0x01, 0x30, 0x8b,
- 0x82, 0x42, 0x14, 0xd6, 0x65, 0xcc, 0xc5, 0x27, 0x24, 0x2e, 0x40, 0x21, 0xcc, 0x5c, 0xd6, 0xa2,
- 0x0a, 0x28, 0x6e, 0x03, 0xb7, 0xc0, 0xc2, 0x40, 0x8f, 0x43, 0x39, 0xb1, 0x76, 0x46, 0x40, 0x2d,
- 0xec, 0xc6, 0x55, 0x50, 0x92, 0x1d, 0xbc, 0x17, 0x22, 0x33, 0x33, 0x7c, 0x3f, 0xb9, 0x9c, 0x21,
- 0x27, 0x32, 0xb3, 0x99, 0x04, 0xaa, 0x55, 0xca, 0x4a, 0xb5, 0xe4, 0x8f, 0x24, 0x00, 0xe3, 0x79,
- 0x38, 0xf1, 0x26, 0x20, 0x66, 0x11, 0xa8, 0x98, 0x4a, 0x32, 0xff, 0xb9, 0x9a, 0x91, 0xff, 0xf8,
- 0x1b, 0x6a, 0x36, 0x02, 0x24, 0x26, 0xfa, 0x68, 0x2e, 0x75, 0xb2, 0x12, 0x20, 0xdf, 0xa9, 0x67,
- 0x40, 0x80, 0x02, 0x60, 0x4f, 0x27, 0x40, 0xff, 0xcc, 0x81, 0x05, 0x5f, 0x39, 0x33, 0x01, 0x4a,
- 0x30, 0xf9, 0xf6, 0x62, 0x27, 0x1b, 0x29, 0xf1, 0xa7, 0xee, 0xff, 0x89, 0x94, 0xf8, 0x5e, 0xa5,
- 0x90, 0x92, 0xdf, 0xe7, 0x82, 0xae, 0x4f, 0x49, 0x4a, 0x9e, 0xc1, 0x0d, 0xc7, 0xd7, 0x8e, 0xd7,
- 0xc8, 0x9f, 0xe4, 0xc1, 0xc9, 0x68, 0x1e, 0x86, 0x0a, 0xa4, 0x34, 0xb1, 0x40, 0xb6, 0xc1, 0xe2,
- 0xfd, 0x81, 0xaa, 0x0e, 0xf9, 0x18, 0x02, 0x55, 0xd2, 0x29, 0xad, 0xdf, 0x15, 0x96, 0x8b, 0x3f,
- 0x4c, 0xd0, 0x41, 0x89, 0x96, 0xf1, 0x7a, 0x59, 0xf8, 0xb2, 0xf5, 0xb2, 0x78, 0x88, 0x7a, 0x99,
- 0x4c, 0x39, 0xf2, 0x87, 0xa2, 0x1c, 0xd3, 0x15, 0xcb, 0x84, 0x8d, 0x6b, 0xe2, 0xd1, 0x7f, 0x2c,
- 0x81, 0xa5, 0xe4, 0x03, 0x37, 0x54, 0xc1, 0x9c, 0x86, 0x1f, 0x06, 0x2f, 0x3e, 0x26, 0x15, 0x91,
- 0x01, 0x55, 0xd4, 0x86, 0xf3, 0x64, 0xd4, 0xb8, 0xad, 0xd3, 0x1d, 0xab, 0x43, 0x2d, 0x45, 0xef,
- 0x3b, 0x95, 0x77, 0x2b, 0x84, 0x85, 0x22, 0xd8, 0xf0, 0x7d, 0x50, 0xd2, 0xf0, 0xc3, 0xce, 0xc0,
- 0xea, 0x27, 0x55, 0xc8, 0x6c, 0xfd, 0xf0, 0x04, 0xd8, 0x12, 0x28, 0xc8, 0xc3, 0x93, 0xbf, 0x90,
- 0xc0, 0x72, 0x4a, 0x55, 0xfd, 0x06, 0x8d, 0xf2, 0x2f, 0x12, 0x38, 0x1b, 0x1a, 0x25, 0x4b, 0x4b,
- 0x72, 0x7f, 0xa0, 0xf2, 0x0c, 0x15, 0x4c, 0xe6, 0x12, 0x28, 0x9b, 0xd8, 0xa2, 0x8a, 0xc7, 0x83,
- 0x8b, 0xad, 0xd9, 0xf1, 0xa8, 0x5e, 0x6e, 0xbb, 0x8d, 0xc8, 0x97, 0x27, 0xcc, 0x4d, 0xee, 0xf9,
- 0xcd, 0x8d, 0xfc, 0x5f, 0x09, 0x14, 0x3b, 0x5d, 0xac, 0x92, 0x23, 0x20, 0x2e, 0x1b, 0x21, 0xe2,
- 0x92, 0xfe, 0x28, 0xc0, 0xfd, 0x49, 0xe5, 0x2c, 0x9b, 0x11, 0xce, 0x72, 0x7e, 0x02, 0xce, 0xd3,
- 0xe9, 0xca, 0x1b, 0xa0, 0xec, 0x75, 0x37, 0xdd, 0x5e, 0x2a, 0xff, 0x2e, 0x07, 0x2a, 0x81, 0x2e,
- 0xa6, 0xdc, 0x89, 0xef, 0x85, 0xca, 0x0f, 0xdb, 0x63, 0x56, 0xb3, 0x0c, 0xa4, 0xe1, 0x96, 0x9a,
- 0xb7, 0x75, 0x6a, 0x05, 0xcf, 0xaa, 0xf1, 0x0a, 0xf4, 0x26, 0x98, 0xa3, 0xd8, 0xea, 0x13, 0xea,
- 0xca, 0xf8, 0x84, 0x95, 0xfd, 0xbb, 0x9b, 0x3b, 0x21, 0x29, 0x8a, 0x68, 0x9f, 0xbe, 0x01, 0x66,
- 0x43, 0x9d, 0xc1, 0x93, 0x20, 0xff, 0x80, 0x0c, 0x1d, 0x06, 0x87, 0xd8, 0x4f, 0xb8, 0x08, 0x8a,
- 0x07, 0x58, 0x1d, 0x38, 0x21, 0x5a, 0x46, 0xce, 0xc7, 0xf5, 0xdc, 0xeb, 0x92, 0xfc, 0x6b, 0x36,
- 0x39, 0x7e, 0x2a, 0x1c, 0x41, 0x74, 0xbd, 0x13, 0x8a, 0xae, 0xf4, 0xf7, 0xc9, 0x60, 0x82, 0xa6,
- 0xc5, 0x18, 0x8a, 0xc4, 0xd8, 0x4b, 0x99, 0xd0, 0x9e, 0x1e, 0x69, 0xff, 0xca, 0x81, 0xc5, 0x80,
- 0xb6, 0xcf, 0x8c, 0xbf, 0x1f, 0x62, 0xc6, 0x2b, 0x11, 0x66, 0x5c, 0x4d, 0xb2, 0xf9, 0x96, 0x1a,
- 0x4f, 0xa6, 0xc6, 0x7f, 0x96, 0xc0, 0x7c, 0x60, 0xee, 0x8e, 0x80, 0x1b, 0xdf, 0x0e, 0x73, 0xe3,
- 0xf3, 0x59, 0x82, 0x26, 0x85, 0x1c, 0x5f, 0x07, 0x0b, 0x01, 0xa5, 0x1d, 0xab, 0xa7, 0xe8, 0x58,
- 0xb5, 0xe1, 0x39, 0x50, 0xb4, 0x29, 0xb6, 0xa8, 0x5b, 0x44, 0x5c, 0xdb, 0x0e, 0x6b, 0x44, 0x8e,
- 0x4c, 0xfe, 0xb7, 0x04, 0x9a, 0x01, 0xe3, 0x36, 0xb1, 0x6c, 0xc5, 0xa6, 0x44, 0xa7, 0x77, 0x0d,
- 0x75, 0xa0, 0x91, 0x75, 0x15, 0x2b, 0x1a, 0x22, 0xac, 0x41, 0x31, 0xf4, 0xb6, 0xa1, 0x2a, 0xdd,
- 0x21, 0xc4, 0xa0, 0xf2, 0xe1, 0x3e, 0xd1, 0x37, 0x88, 0x4a, 0xa8, 0x78, 0x81, 0x2b, 0xb7, 0xde,
- 0x72, 0x1f, 0xa4, 0xde, 0xf3, 0x45, 0x4f, 0x46, 0xf5, 0x95, 0x2c, 0x88, 0x3c, 0x42, 0x83, 0x98,
- 0xf0, 0xa7, 0x00, 0xb0, 0x4f, 0xbe, 0x97, 0xf5, 0x44, 0xb0, 0xbe, 0xe9, 0x66, 0xf4, 0x7b, 0x9e,
- 0x64, 0xaa, 0x0e, 0x02, 0x88, 0xf2, 0x1f, 0x4a, 0xa1, 0xf5, 0xfe, 0xc6, 0xdf, 0x72, 0xfe, 0x1c,
- 0x2c, 0x1e, 0xf8, 0xb3, 0xe3, 0x2a, 0x30, 0xfe, 0x9d, 0x8f, 0x9e, 0xe4, 0x3d, 0xf8, 0xa4, 0x79,
- 0xf5, 0x59, 0xff, 0xdd, 0x04, 0x38, 0x94, 0xd8, 0x09, 0x7c, 0x15, 0x54, 0x18, 0x6f, 0x56, 0xba,
- 0x64, 0x1b, 0x6b, 0x6e, 0x2e, 0x7a, 0x0f, 0x98, 0x1d, 0x5f, 0x84, 0x82, 0x7a, 0x70, 0x1f, 0x2c,
- 0x98, 0x46, 0x6f, 0x0b, 0xeb, 0xb8, 0x4f, 0x18, 0x11, 0x74, 0x96, 0x92, 0x5f, 0x7d, 0x96, 0x5b,
- 0xaf, 0xb9, 0xd7, 0x5a, 0xed, 0xb8, 0xca, 0x93, 0x51, 0x7d, 0x39, 0xa1, 0x99, 0x07, 0x41, 0x12,
- 0x24, 0xb4, 0x62, 0x8f, 0xee, 0xce, 0xa3, 0xc3, 0x6a, 0x96, 0xa4, 0x3c, 0xe4, 0xb3, 0x7b, 0xda,
- 0xcd, 0x6e, 0xe9, 0x50, 0x37, 0xbb, 0x09, 0x47, 0xdc, 0xf2, 0x94, 0x47, 0xdc, 0x4f, 0x24, 0x70,
- 0xde, 0xcc, 0x90, 0x4b, 0x55, 0xc0, 0xe7, 0xe6, 0x56, 0x96, 0xb9, 0xc9, 0x92, 0x9b, 0xad, 0x95,
- 0xf1, 0xa8, 0x7e, 0x3e, 0x8b, 0x26, 0xca, 0xe4, 0x1f, 0xbc, 0x0b, 0x4a, 0x86, 0xd8, 0x03, 0xab,
- 0x15, 0xee, 0xeb, 0xe5, 0x2c, 0xbe, 0xba, 0xfb, 0xa6, 0x93, 0x96, 0xee, 0x17, 0xf2, 0xb0, 0xe4,
- 0x8f, 0x8a, 0xe0, 0x54, 0xac, 0x82, 0x7f, 0x85, 0xf7, 0xd7, 0xb1, 0xc3, 0x74, 0x7e, 0x8a, 0xc3,
- 0xf4, 0x1a, 0x98, 0x17, 0x7f, 0x89, 0x88, 0x9c, 0xc5, 0xbd, 0x80, 0x59, 0x0f, 0x8b, 0x51, 0x54,
- 0x3f, 0xe9, 0xfe, 0xbc, 0x38, 0xe5, 0xfd, 0x79, 0xd0, 0x0b, 0xf1, 0x17, 0x3f, 0x27, 0xbd, 0xe3,
- 0x5e, 0x88, 0x7f, 0xfa, 0x45, 0xf5, 0x19, 0x71, 0x75, 0x50, 0x3d, 0x84, 0xe3, 0x61, 0xe2, 0xba,
- 0x1b, 0x92, 0xa2, 0x88, 0xf6, 0x97, 0x7a, 0xf6, 0xc7, 0x09, 0xcf, 0xfe, 0x57, 0xb2, 0xc4, 0x5a,
- 0xf6, 0xab, 0xf2, 0xc4, 0x4b, 0x8f, 0xca, 0xf4, 0x97, 0x1e, 0xf2, 0xdf, 0x24, 0xf0, 0x42, 0xea,
- 0xae, 0x05, 0xd7, 0x42, 0xb4, 0xf2, 0x4a, 0x84, 0x56, 0x7e, 0x2f, 0xd5, 0x30, 0xc0, 0x2d, 0xad,
- 0xe4, 0x5b, 0xf4, 0x37, 0xb2, 0xdd, 0xa2, 0x27, 0x9c, 0x84, 0x27, 0x5f, 0xa7, 0xb7, 0x7e, 0xf0,
- 0xe8, 0x71, 0xed, 0xd8, 0xa7, 0x8f, 0x6b, 0xc7, 0x3e, 0x7f, 0x5c, 0x3b, 0xf6, 0x8b, 0x71, 0x4d,
- 0x7a, 0x34, 0xae, 0x49, 0x9f, 0x8e, 0x6b, 0xd2, 0xe7, 0xe3, 0x9a, 0xf4, 0xf7, 0x71, 0x4d, 0xfa,
- 0xcd, 0x17, 0xb5, 0x63, 0xef, 0x2f, 0xa7, 0xfc, 0xe9, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff,
- 0xa4, 0x79, 0xcd, 0x52, 0x8e, 0x2c, 0x00, 0x00,
+ 0x15, 0xf7, 0x92, 0xa2, 0x44, 0x0e, 0x2d, 0xc9, 0x1e, 0xa9, 0x22, 0x63, 0xb7, 0xa4, 0xb1, 0x36,
+ 0x6c, 0x25, 0xb6, 0x49, 0x5b, 0xf9, 0x40, 0x62, 0xb7, 0x09, 0x44, 0x29, 0xb5, 0x1d, 0x48, 0x32,
+ 0x33, 0xb4, 0x1c, 0x34, 0xe8, 0x87, 0x47, 0xe4, 0x98, 0xda, 0x78, 0xbf, 0xb0, 0x3b, 0x54, 0x4c,
+ 0xf4, 0xd2, 0x6b, 0x81, 0x16, 0x6d, 0xae, 0xfd, 0x27, 0x8a, 0x5e, 0x8a, 0xa2, 0x41, 0x6f, 0x41,
+ 0xe1, 0x63, 0xd0, 0x4b, 0x72, 0x22, 0x6a, 0xe6, 0x54, 0x14, 0xbd, 0xb5, 0x17, 0x03, 0x05, 0x8a,
+ 0x99, 0x9d, 0xfd, 0xde, 0x35, 0x97, 0x8a, 0xad, 0x34, 0x41, 0x6e, 0xdc, 0x79, 0xef, 0xfd, 0xe6,
+ 0xcd, 0xcc, 0x7b, 0xf3, 0x7e, 0xfb, 0xb8, 0xe0, 0xc2, 0x83, 0xd7, 0xed, 0x86, 0x62, 0x34, 0xb1,
+ 0xa9, 0x34, 0xb1, 0x69, 0xda, 0xcd, 0x83, 0xab, 0x7b, 0x84, 0xe2, 0xb5, 0x66, 0x9f, 0xe8, 0xc4,
+ 0xc2, 0x94, 0xf4, 0x1a, 0xa6, 0x65, 0x50, 0x03, 0x56, 0x1c, 0xc5, 0x06, 0x36, 0x95, 0x06, 0x53,
+ 0x6c, 0x08, 0xc5, 0x53, 0x97, 0xfb, 0x0a, 0xdd, 0x1f, 0xec, 0x35, 0xba, 0x86, 0xd6, 0xec, 0x1b,
+ 0x7d, 0xa3, 0xc9, 0xf5, 0xf7, 0x06, 0xf7, 0xf9, 0x13, 0x7f, 0xe0, 0xbf, 0x1c, 0x9c, 0x53, 0x72,
+ 0x60, 0xc2, 0xae, 0x61, 0x91, 0xe6, 0xc1, 0xd5, 0xe8, 0x5c, 0xa7, 0x5e, 0xf1, 0x75, 0x34, 0xdc,
+ 0xdd, 0x57, 0x74, 0x62, 0x0d, 0x9b, 0xe6, 0x83, 0x3e, 0x1b, 0xb0, 0x9b, 0x1a, 0xa1, 0x38, 0xc9,
+ 0xaa, 0x99, 0x66, 0x65, 0x0d, 0x74, 0xaa, 0x68, 0x24, 0x66, 0xf0, 0xda, 0x24, 0x03, 0xbb, 0xbb,
+ 0x4f, 0x34, 0x1c, 0xb3, 0x7b, 0x39, 0xcd, 0x6e, 0x40, 0x15, 0xb5, 0xa9, 0xe8, 0xd4, 0xa6, 0x56,
+ 0xd4, 0x48, 0xfe, 0x8f, 0x04, 0xe0, 0x86, 0xa1, 0x53, 0xcb, 0x50, 0x55, 0x62, 0x21, 0x72, 0xa0,
+ 0xd8, 0x8a, 0xa1, 0xc3, 0x7b, 0xa0, 0xc8, 0xd6, 0xd3, 0xc3, 0x14, 0x57, 0xa5, 0x33, 0xd2, 0x6a,
+ 0x79, 0xed, 0x4a, 0xc3, 0xdf, 0x69, 0x0f, 0xbe, 0x61, 0x3e, 0xe8, 0xb3, 0x01, 0xbb, 0xc1, 0xb4,
+ 0x1b, 0x07, 0x57, 0x1b, 0xb7, 0xf7, 0x3e, 0x20, 0x5d, 0xba, 0x4d, 0x28, 0x6e, 0xc1, 0x47, 0xa3,
+ 0xfa, 0xb1, 0xf1, 0xa8, 0x0e, 0xfc, 0x31, 0xe4, 0xa1, 0xc2, 0xdb, 0x60, 0x86, 0xa3, 0xe7, 0x38,
+ 0xfa, 0xe5, 0x54, 0x74, 0xb1, 0xe8, 0x06, 0xc2, 0x1f, 0xbe, 0xfd, 0x90, 0x12, 0x9d, 0xb9, 0xd7,
+ 0x3a, 0x2e, 0xa0, 0x67, 0x36, 0x31, 0xc5, 0x88, 0x03, 0xc1, 0x4b, 0xa0, 0x68, 0x09, 0xf7, 0xab,
+ 0xf9, 0x33, 0xd2, 0x6a, 0xbe, 0x75, 0x42, 0x68, 0x15, 0xdd, 0x65, 0x21, 0x4f, 0x43, 0x7e, 0x24,
+ 0x81, 0x95, 0xf8, 0xba, 0xb7, 0x14, 0x9b, 0xc2, 0x1f, 0xc7, 0xd6, 0xde, 0xc8, 0xb6, 0x76, 0x66,
+ 0xcd, 0x57, 0xee, 0x4d, 0xec, 0x8e, 0x04, 0xd6, 0xdd, 0x06, 0x05, 0x85, 0x12, 0xcd, 0xae, 0xe6,
+ 0xce, 0xe4, 0x57, 0xcb, 0x6b, 0x17, 0x1b, 0x29, 0x01, 0xdc, 0x88, 0x7b, 0xd7, 0x9a, 0x17, 0xb8,
+ 0x85, 0x5b, 0x0c, 0x01, 0x39, 0x40, 0xf2, 0x2f, 0x73, 0xa0, 0xb4, 0x89, 0x89, 0x66, 0xe8, 0x1d,
+ 0x42, 0x8f, 0xe0, 0xe4, 0x6e, 0x82, 0x19, 0xdb, 0x24, 0x5d, 0x71, 0x72, 0xe7, 0x53, 0x17, 0xe0,
+ 0xf9, 0xd4, 0x31, 0x49, 0xd7, 0x3f, 0x32, 0xf6, 0x84, 0x38, 0x02, 0x6c, 0x83, 0x59, 0x9b, 0x62,
+ 0x3a, 0xb0, 0xf9, 0x81, 0x95, 0xd7, 0x56, 0x33, 0x60, 0x71, 0xfd, 0xd6, 0x82, 0x40, 0x9b, 0x75,
+ 0x9e, 0x91, 0xc0, 0x91, 0xff, 0x91, 0x03, 0xd0, 0xd3, 0xdd, 0x30, 0xf4, 0x9e, 0x42, 0x59, 0x38,
+ 0x5f, 0x03, 0x33, 0x74, 0x68, 0x12, 0xbe, 0x21, 0xa5, 0xd6, 0x79, 0xd7, 0x95, 0x3b, 0x43, 0x93,
+ 0x3c, 0x19, 0xd5, 0x57, 0xe2, 0x16, 0x4c, 0x82, 0xb8, 0x0d, 0xdc, 0xf2, 0x9c, 0xcc, 0x71, 0xeb,
+ 0x57, 0xc2, 0x53, 0x3f, 0x19, 0xd5, 0x13, 0xee, 0x8e, 0x86, 0x87, 0x14, 0x76, 0x10, 0x1e, 0x00,
+ 0xa8, 0x62, 0x9b, 0xde, 0xb1, 0xb0, 0x6e, 0x3b, 0x33, 0x29, 0x1a, 0x11, 0xcb, 0x7f, 0x29, 0xdb,
+ 0x41, 0x31, 0x8b, 0xd6, 0x29, 0xe1, 0x05, 0xdc, 0x8a, 0xa1, 0xa1, 0x84, 0x19, 0xe0, 0x79, 0x30,
+ 0x6b, 0x11, 0x6c, 0x1b, 0x7a, 0x75, 0x86, 0xaf, 0xc2, 0xdb, 0x40, 0xc4, 0x47, 0x91, 0x90, 0xc2,
+ 0x17, 0xc1, 0x9c, 0x46, 0x6c, 0x1b, 0xf7, 0x49, 0xb5, 0xc0, 0x15, 0x17, 0x85, 0xe2, 0xdc, 0xb6,
+ 0x33, 0x8c, 0x5c, 0xb9, 0xfc, 0x47, 0x09, 0xcc, 0x7b, 0x3b, 0x77, 0x04, 0x99, 0x73, 0x23, 0x9c,
+ 0x39, 0xf2, 0xe4, 0x60, 0x49, 0x49, 0x98, 0x4f, 0xf2, 0x01, 0xc7, 0x59, 0x38, 0xc2, 0x9f, 0x80,
+ 0xa2, 0x4d, 0x54, 0xd2, 0xa5, 0x86, 0x25, 0x1c, 0x7f, 0x39, 0xa3, 0xe3, 0x78, 0x8f, 0xa8, 0x1d,
+ 0x61, 0xda, 0x3a, 0xce, 0x3c, 0x77, 0x9f, 0x90, 0x07, 0x09, 0xdf, 0x05, 0x45, 0x4a, 0x34, 0x53,
+ 0xc5, 0x94, 0x88, 0xac, 0x39, 0x1b, 0x74, 0x9e, 0xc5, 0x0c, 0x03, 0x6b, 0x1b, 0xbd, 0x3b, 0x42,
+ 0x8d, 0xa7, 0x8c, 0xb7, 0x19, 0xee, 0x28, 0xf2, 0x60, 0xa0, 0x09, 0x16, 0x06, 0x66, 0x8f, 0x69,
+ 0x52, 0x76, 0x9d, 0xf7, 0x87, 0x22, 0x86, 0xae, 0x4c, 0xde, 0x95, 0xdd, 0x90, 0x5d, 0x6b, 0x45,
+ 0xcc, 0xb2, 0x10, 0x1e, 0x47, 0x11, 0x7c, 0xb8, 0x0e, 0x16, 0x35, 0x45, 0x47, 0x04, 0xf7, 0x86,
+ 0x1d, 0xd2, 0x35, 0xf4, 0x9e, 0xcd, 0x43, 0xa9, 0xd0, 0xaa, 0x08, 0x80, 0xc5, 0xed, 0xb0, 0x18,
+ 0x45, 0xf5, 0xe1, 0x16, 0x58, 0x76, 0x2f, 0xe0, 0x9b, 0x8a, 0x4d, 0x0d, 0x6b, 0xb8, 0xa5, 0x68,
+ 0x0a, 0xad, 0xce, 0x72, 0x9c, 0xea, 0x78, 0x54, 0x5f, 0x46, 0x09, 0x72, 0x94, 0x68, 0x25, 0x7f,
+ 0x34, 0x0b, 0x16, 0x23, 0xf7, 0x02, 0xbc, 0x0b, 0x56, 0xba, 0x03, 0xcb, 0x22, 0x3a, 0xdd, 0x19,
+ 0x68, 0x7b, 0xc4, 0xea, 0x74, 0xf7, 0x49, 0x6f, 0xa0, 0x92, 0x1e, 0x3f, 0xd6, 0x42, 0xab, 0x26,
+ 0x7c, 0x5d, 0xd9, 0x48, 0xd4, 0x42, 0x29, 0xd6, 0xf0, 0x1d, 0x00, 0x75, 0x3e, 0xb4, 0xad, 0xd8,
+ 0xb6, 0x87, 0x99, 0xe3, 0x98, 0x5e, 0x2a, 0xee, 0xc4, 0x34, 0x50, 0x82, 0x15, 0xf3, 0xb1, 0x47,
+ 0x6c, 0xc5, 0x22, 0xbd, 0xa8, 0x8f, 0xf9, 0xb0, 0x8f, 0x9b, 0x89, 0x5a, 0x28, 0xc5, 0x1a, 0xbe,
+ 0x0a, 0xca, 0xce, 0x6c, 0x7c, 0xcf, 0xc5, 0xe1, 0x2c, 0x09, 0xb0, 0xf2, 0x8e, 0x2f, 0x42, 0x41,
+ 0x3d, 0xb6, 0x34, 0x63, 0xcf, 0x26, 0xd6, 0x01, 0xe9, 0xdd, 0x70, 0xc8, 0x01, 0xab, 0xa0, 0x05,
+ 0x5e, 0x41, 0xbd, 0xa5, 0xdd, 0x8e, 0x69, 0xa0, 0x04, 0x2b, 0xb6, 0x34, 0x27, 0x6a, 0x62, 0x4b,
+ 0x9b, 0x0d, 0x2f, 0x6d, 0x37, 0x51, 0x0b, 0xa5, 0x58, 0xb3, 0xd8, 0x73, 0x5c, 0x5e, 0x3f, 0xc0,
+ 0x8a, 0x8a, 0xf7, 0x54, 0x52, 0x9d, 0x0b, 0xc7, 0xde, 0x4e, 0x58, 0x8c, 0xa2, 0xfa, 0xf0, 0x06,
+ 0x38, 0xe9, 0x0c, 0xed, 0xea, 0xd8, 0x03, 0x29, 0x72, 0x90, 0x17, 0x04, 0xc8, 0xc9, 0x9d, 0xa8,
+ 0x02, 0x8a, 0xdb, 0xc0, 0x6b, 0x60, 0xa1, 0x6b, 0xa8, 0x2a, 0x8f, 0xc7, 0x0d, 0x63, 0xa0, 0xd3,
+ 0x6a, 0x89, 0xa3, 0x40, 0x96, 0x43, 0x1b, 0x21, 0x09, 0x8a, 0x68, 0xc2, 0x9f, 0x01, 0xd0, 0x75,
+ 0x0b, 0x83, 0x5d, 0x05, 0x13, 0x18, 0x40, 0xbc, 0x2c, 0xf9, 0x95, 0xd9, 0x1b, 0xb2, 0x51, 0x00,
+ 0x52, 0xfe, 0x44, 0x02, 0x95, 0x94, 0x44, 0x87, 0x6f, 0x85, 0x8a, 0xe0, 0xc5, 0x48, 0x11, 0x3c,
+ 0x9d, 0x62, 0x16, 0xa8, 0x84, 0xfb, 0x60, 0x9e, 0x11, 0x12, 0x45, 0xef, 0x3b, 0x2a, 0xe2, 0x2e,
+ 0x6b, 0xa6, 0x2e, 0x00, 0x05, 0xb5, 0xfd, 0x5b, 0xf9, 0xe4, 0x78, 0x54, 0x9f, 0x0f, 0xc9, 0x50,
+ 0x18, 0x58, 0xfe, 0x55, 0x0e, 0x80, 0x4d, 0x62, 0xaa, 0xc6, 0x50, 0x23, 0xfa, 0x51, 0x70, 0x9a,
+ 0x5b, 0x21, 0x4e, 0x73, 0x21, 0xfd, 0x48, 0x3c, 0xa7, 0x52, 0x49, 0xcd, 0xbb, 0x11, 0x52, 0xf3,
+ 0x62, 0x16, 0xb0, 0xa7, 0xb3, 0x9a, 0xcf, 0xf2, 0x60, 0xc9, 0x57, 0xf6, 0x69, 0xcd, 0xf5, 0xd0,
+ 0x89, 0x5e, 0x88, 0x9c, 0x68, 0x25, 0xc1, 0xe4, 0xb9, 0xf1, 0x9a, 0x0f, 0xc0, 0x02, 0x63, 0x1d,
+ 0xce, 0xf9, 0x71, 0x4e, 0x33, 0x3b, 0x35, 0xa7, 0xf1, 0x2a, 0xd1, 0x56, 0x08, 0x09, 0x45, 0x90,
+ 0x53, 0x38, 0xd4, 0xdc, 0xd7, 0x91, 0x43, 0xfd, 0x49, 0x02, 0x0b, 0xfe, 0x31, 0x1d, 0x01, 0x89,
+ 0xba, 0x19, 0x26, 0x51, 0x67, 0x33, 0x04, 0x67, 0x0a, 0x8b, 0xfa, 0x6c, 0x26, 0xe8, 0x3a, 0xa7,
+ 0x51, 0xab, 0xec, 0x15, 0xcc, 0x54, 0x95, 0x2e, 0xb6, 0x45, 0xbd, 0x3d, 0xee, 0xbc, 0x7e, 0x39,
+ 0x63, 0xc8, 0x93, 0x86, 0x08, 0x57, 0xee, 0xf9, 0x12, 0xae, 0xfc, 0xb3, 0x21, 0x5c, 0x3f, 0x02,
+ 0x45, 0xdb, 0xa5, 0x5a, 0x33, 0x1c, 0xf2, 0x62, 0xa6, 0xc4, 0x16, 0x2c, 0xcb, 0x83, 0xf6, 0xf8,
+ 0x95, 0x07, 0x97, 0xc4, 0xac, 0x0a, 0x5f, 0x25, 0xb3, 0x62, 0x81, 0x6e, 0xe2, 0x81, 0x4d, 0x7a,
+ 0x3c, 0xa9, 0x8a, 0x7e, 0xa0, 0xb7, 0xf9, 0x28, 0x12, 0x52, 0xb8, 0x0b, 0x2a, 0xa6, 0x65, 0xf4,
+ 0x2d, 0x62, 0xdb, 0x9b, 0x04, 0xf7, 0x54, 0x45, 0x27, 0xee, 0x02, 0x9c, 0x9a, 0x78, 0x7a, 0x3c,
+ 0xaa, 0x57, 0xda, 0xc9, 0x2a, 0x28, 0xcd, 0x56, 0xfe, 0x75, 0x01, 0x9c, 0x88, 0xde, 0x8d, 0x29,
+ 0x34, 0x45, 0x3a, 0x14, 0x4d, 0xb9, 0x14, 0x88, 0x53, 0x87, 0xc3, 0x05, 0x5a, 0x05, 0xb1, 0x58,
+ 0x5d, 0x07, 0x8b, 0x82, 0x96, 0xb8, 0x42, 0x41, 0xd4, 0xbc, 0xe3, 0xd9, 0x0d, 0x8b, 0x51, 0x54,
+ 0x1f, 0x5e, 0x07, 0xf3, 0x16, 0x67, 0x5e, 0x2e, 0x80, 0xc3, 0x5e, 0xbe, 0x23, 0x00, 0xe6, 0x51,
+ 0x50, 0x88, 0xc2, 0xba, 0x8c, 0xb9, 0xf8, 0x84, 0xc4, 0x05, 0x98, 0x09, 0x33, 0x97, 0xf5, 0xa8,
+ 0x02, 0x8a, 0xdb, 0xc0, 0x6d, 0xb0, 0x34, 0xd0, 0xe3, 0x50, 0x4e, 0xac, 0x9d, 0x16, 0x50, 0x4b,
+ 0xbb, 0x71, 0x15, 0x94, 0x64, 0x07, 0x6f, 0x81, 0x25, 0x4a, 0x2c, 0x4d, 0xd1, 0x31, 0x55, 0xf4,
+ 0xbe, 0x07, 0xe7, 0x9c, 0x7c, 0x85, 0x41, 0xdd, 0x89, 0x8b, 0x51, 0x92, 0x0d, 0xbc, 0x17, 0xe2,
+ 0x45, 0xb3, 0xfc, 0x6a, 0xba, 0x94, 0x21, 0xbd, 0x32, 0x13, 0xa3, 0x04, 0xd6, 0x56, 0xcc, 0xca,
+ 0xda, 0xe4, 0x8f, 0x25, 0x00, 0xe3, 0x29, 0x3d, 0xb1, 0xa9, 0x10, 0xb3, 0x08, 0x14, 0x5f, 0x25,
+ 0x99, 0x4a, 0x5d, 0xc9, 0x48, 0xa5, 0xfc, 0xbb, 0x39, 0x1b, 0x97, 0x12, 0x1b, 0x7d, 0x34, 0xfd,
+ 0xa1, 0xac, 0x5c, 0xca, 0x77, 0xea, 0x19, 0x70, 0xa9, 0x00, 0xd8, 0xd3, 0xb9, 0xd4, 0x3f, 0x73,
+ 0x60, 0xc9, 0x57, 0xce, 0xcc, 0xa5, 0x12, 0x4c, 0xbe, 0xed, 0x11, 0x65, 0xe3, 0x37, 0xfe, 0xd6,
+ 0xfd, 0x3f, 0xf1, 0x1b, 0xdf, 0xab, 0x14, 0x7e, 0xf3, 0xfb, 0x5c, 0xd0, 0xf5, 0x29, 0xf9, 0xcd,
+ 0x33, 0x68, 0x96, 0x7c, 0xed, 0x28, 0x92, 0xfc, 0xd1, 0x0c, 0x38, 0x11, 0xcd, 0xc3, 0x50, 0xad,
+ 0x95, 0x26, 0xd6, 0xda, 0x36, 0x58, 0xbe, 0x3f, 0x50, 0xd5, 0x21, 0x5f, 0x43, 0xa0, 0xe0, 0x3a,
+ 0x55, 0xfa, 0xbb, 0xc2, 0x72, 0xf9, 0x87, 0x09, 0x3a, 0x28, 0xd1, 0x32, 0x5e, 0x7a, 0x67, 0xbe,
+ 0x6c, 0xe9, 0x2d, 0x1c, 0xa2, 0xf4, 0xa6, 0xd4, 0xca, 0xb9, 0x43, 0xd4, 0xca, 0x64, 0x22, 0x94,
+ 0x3f, 0x14, 0x11, 0x9a, 0xae, 0xee, 0x26, 0xdc, 0x81, 0x13, 0x1b, 0x12, 0x63, 0x09, 0xac, 0x24,
+ 0xb7, 0x01, 0xa0, 0x0a, 0x16, 0x34, 0xfc, 0x30, 0xd8, 0x8e, 0x99, 0x54, 0x8f, 0x06, 0x54, 0x51,
+ 0x1b, 0xce, 0x1f, 0x59, 0x8d, 0x5b, 0x3a, 0xbd, 0x6d, 0x75, 0xa8, 0xa5, 0xe8, 0x7d, 0xa7, 0x88,
+ 0x6f, 0x87, 0xb0, 0x50, 0x04, 0x1b, 0xbe, 0x0f, 0x8a, 0x1a, 0x7e, 0xd8, 0x19, 0x58, 0xfd, 0xa4,
+ 0x62, 0x9b, 0x6d, 0x1e, 0x9e, 0x4b, 0xdb, 0x02, 0x05, 0x79, 0x78, 0xf2, 0x17, 0x12, 0xa8, 0xa4,
+ 0x14, 0xe8, 0x6f, 0xd0, 0x2a, 0xff, 0x22, 0x81, 0x33, 0xa1, 0x55, 0xb2, 0x0c, 0x27, 0xf7, 0x07,
+ 0x2a, 0x4f, 0x76, 0x41, 0x8a, 0x2e, 0x82, 0x92, 0x89, 0x2d, 0xaa, 0x78, 0xec, 0xbc, 0xd0, 0x9a,
+ 0x1f, 0x8f, 0xea, 0xa5, 0xb6, 0x3b, 0x88, 0x7c, 0x79, 0xc2, 0xde, 0xe4, 0x9e, 0xdf, 0xde, 0xc8,
+ 0xff, 0x95, 0x40, 0xa1, 0xd3, 0xc5, 0x2a, 0x39, 0x02, 0x0e, 0xb4, 0x19, 0xe2, 0x40, 0xe9, 0x7f,
+ 0x55, 0x70, 0x7f, 0x52, 0xe9, 0xcf, 0x56, 0x84, 0xfe, 0x9c, 0x9b, 0x80, 0xf3, 0x74, 0xe6, 0xf3,
+ 0x06, 0x28, 0x79, 0xd3, 0x4d, 0x77, 0x2d, 0xcb, 0xbf, 0xcb, 0x81, 0x72, 0x60, 0x8a, 0x29, 0x2f,
+ 0xf5, 0x7b, 0xa1, 0x4a, 0xc6, 0xee, 0x98, 0xb5, 0x2c, 0x0b, 0x69, 0xb8, 0x55, 0xeb, 0x6d, 0x9d,
+ 0x5a, 0xc1, 0x37, 0xe8, 0x78, 0x31, 0x7b, 0x13, 0x2c, 0x50, 0x6c, 0xf5, 0x09, 0x75, 0x65, 0x7c,
+ 0xc3, 0x4a, 0x7e, 0x47, 0xe9, 0x4e, 0x48, 0x8a, 0x22, 0xda, 0xa7, 0xae, 0x83, 0xf9, 0xd0, 0x64,
+ 0xf0, 0x04, 0xc8, 0x3f, 0x20, 0x43, 0x87, 0x0c, 0x22, 0xf6, 0x13, 0x2e, 0x83, 0xc2, 0x01, 0x56,
+ 0x07, 0x4e, 0x88, 0x96, 0x90, 0xf3, 0x70, 0x2d, 0xf7, 0xba, 0x24, 0xff, 0x86, 0x6d, 0x8e, 0x9f,
+ 0x0a, 0x47, 0x10, 0x5d, 0xef, 0x84, 0xa2, 0x2b, 0xfd, 0x5f, 0xd3, 0x60, 0x82, 0xa6, 0xc5, 0x18,
+ 0x8a, 0xc4, 0xd8, 0x4b, 0x99, 0xd0, 0x9e, 0x1e, 0x69, 0xff, 0xca, 0x81, 0xe5, 0x80, 0xb6, 0x4f,
+ 0xb2, 0xbf, 0x1f, 0x22, 0xd9, 0xab, 0x11, 0x92, 0x5d, 0x4d, 0xb2, 0xf9, 0x96, 0x65, 0x4f, 0x66,
+ 0xd9, 0x7f, 0x96, 0xc0, 0x62, 0x60, 0xef, 0x8e, 0x80, 0x66, 0xdf, 0x0a, 0xd3, 0xec, 0x73, 0x59,
+ 0x82, 0x26, 0x85, 0x67, 0x5f, 0x03, 0x4b, 0x01, 0xa5, 0xdb, 0x56, 0x4f, 0xd1, 0xb1, 0x6a, 0xc3,
+ 0xb3, 0xa0, 0x60, 0x53, 0x6c, 0x51, 0xb7, 0x88, 0xb8, 0xb6, 0x1d, 0x36, 0x88, 0x1c, 0x99, 0xfc,
+ 0x6f, 0x09, 0x34, 0x03, 0xc6, 0x6d, 0x62, 0xd9, 0x8a, 0x4d, 0x89, 0x4e, 0xef, 0x1a, 0xea, 0x40,
+ 0x23, 0x1b, 0x2a, 0x56, 0x34, 0x44, 0xd8, 0x80, 0x62, 0xe8, 0x6d, 0x43, 0x55, 0xba, 0x43, 0x88,
+ 0x41, 0xf9, 0xc3, 0x7d, 0xa2, 0x6f, 0x12, 0x95, 0x50, 0xf1, 0xbf, 0x60, 0xa9, 0xf5, 0x96, 0xfb,
+ 0x37, 0xd9, 0x7b, 0xbe, 0xe8, 0xc9, 0xa8, 0xbe, 0x9a, 0x05, 0x91, 0x47, 0x68, 0x10, 0x13, 0xfe,
+ 0x14, 0x00, 0xf6, 0xc8, 0xef, 0xb2, 0x9e, 0x08, 0xd6, 0x37, 0xdd, 0x8c, 0x7e, 0xcf, 0x93, 0x4c,
+ 0x35, 0x41, 0x00, 0x51, 0xfe, 0x43, 0x31, 0x74, 0xde, 0xdf, 0xf8, 0xde, 0xeb, 0xcf, 0xc1, 0xf2,
+ 0x81, 0xbf, 0x3b, 0xae, 0x02, 0xa3, 0xf2, 0xf9, 0x68, 0x53, 0xc0, 0x83, 0x4f, 0xda, 0x57, 0xff,
+ 0x05, 0xe2, 0x6e, 0x02, 0x1c, 0x4a, 0x9c, 0x04, 0xbe, 0x0a, 0xca, 0x8c, 0x37, 0x2b, 0x5d, 0xb2,
+ 0x83, 0x35, 0x37, 0x17, 0xbd, 0xbf, 0x55, 0x3b, 0xbe, 0x08, 0x05, 0xf5, 0xe0, 0x3e, 0x58, 0x32,
+ 0x8d, 0xde, 0x36, 0xd6, 0x71, 0x9f, 0x30, 0x22, 0xe8, 0x1c, 0x25, 0x6f, 0xc8, 0x96, 0x5a, 0xaf,
+ 0xb9, 0xcd, 0xb6, 0x76, 0x5c, 0xe5, 0xc9, 0xa8, 0x5e, 0x49, 0x18, 0xe6, 0x41, 0x90, 0x04, 0x09,
+ 0xad, 0xd8, 0xa7, 0x00, 0xce, 0x5f, 0x21, 0x6b, 0x59, 0x92, 0xf2, 0x90, 0x1f, 0x03, 0xa4, 0xf5,
+ 0x9b, 0x8b, 0x87, 0xea, 0x37, 0x27, 0xbc, 0x2d, 0x97, 0xa6, 0x7c, 0x5b, 0xfe, 0xab, 0x04, 0xce,
+ 0x99, 0x19, 0x72, 0xa9, 0x0a, 0xf8, 0xde, 0xdc, 0xcc, 0xb2, 0x37, 0x59, 0x72, 0xb3, 0xb5, 0x3a,
+ 0x1e, 0xd5, 0xcf, 0x65, 0xd1, 0x44, 0x99, 0xfc, 0x83, 0x77, 0x41, 0xd1, 0x10, 0x77, 0x60, 0xb5,
+ 0xcc, 0x7d, 0xbd, 0x94, 0xc5, 0x57, 0xf7, 0xde, 0x74, 0xd2, 0xd2, 0x7d, 0x42, 0x1e, 0x96, 0xfc,
+ 0x71, 0x01, 0x9c, 0x8c, 0x55, 0xf0, 0xaf, 0xb0, 0xab, 0x1e, 0x7b, 0x2f, 0xcf, 0x4f, 0xf1, 0x5e,
+ 0xbe, 0x0e, 0x16, 0xc5, 0x87, 0x1a, 0x91, 0xd7, 0x7a, 0x2f, 0x60, 0x36, 0xc2, 0x62, 0x14, 0xd5,
+ 0x4f, 0xea, 0xea, 0x17, 0xa6, 0xec, 0xea, 0x07, 0xbd, 0x10, 0x1f, 0x1e, 0x3a, 0xe9, 0x1d, 0xf7,
+ 0x42, 0x7c, 0x7f, 0x18, 0xd5, 0x67, 0xc4, 0xd5, 0x41, 0xf5, 0x10, 0xe6, 0xc2, 0xc4, 0x75, 0x37,
+ 0x24, 0x45, 0x11, 0xed, 0x2f, 0xf5, 0x31, 0x02, 0x4e, 0xf8, 0x18, 0xe1, 0x72, 0x96, 0x58, 0xcb,
+ 0xde, 0x75, 0x4f, 0xec, 0x9f, 0x94, 0xa7, 0xef, 0x9f, 0xc8, 0x7f, 0x93, 0xc0, 0x0b, 0xa9, 0xb7,
+ 0x16, 0x5c, 0x0f, 0xd1, 0xca, 0xcb, 0x11, 0x5a, 0xf9, 0xbd, 0x54, 0xc3, 0x00, 0xb7, 0xb4, 0x92,
+ 0x1b, 0xf2, 0x6f, 0x64, 0x6b, 0xc8, 0x27, 0xbc, 0x09, 0x4f, 0xee, 0xcc, 0xb7, 0x7e, 0xf0, 0xe8,
+ 0x71, 0xed, 0xd8, 0xa7, 0x8f, 0x6b, 0xc7, 0x3e, 0x7f, 0x5c, 0x3b, 0xf6, 0x8b, 0x71, 0x4d, 0x7a,
+ 0x34, 0xae, 0x49, 0x9f, 0x8e, 0x6b, 0xd2, 0xe7, 0xe3, 0x9a, 0xf4, 0xf7, 0x71, 0x4d, 0xfa, 0xed,
+ 0x17, 0xb5, 0x63, 0xef, 0x57, 0x52, 0x3e, 0x85, 0xfe, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd4,
+ 0x01, 0x82, 0xf5, 0x24, 0x2d, 0x00, 0x00,
}
func (m *ControllerRevision) Marshal() (dAtA []byte, err error) {
@@ -1845,6 +1847,11 @@ func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.TerminatingReplicas != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas))
+ i--
+ dAtA[i] = 0x48
+ }
if m.CollisionCount != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount))
i--
@@ -2151,6 +2158,11 @@ func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.TerminatingReplicas != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas))
+ i--
+ dAtA[i] = 0x38
+ }
if len(m.Conditions) > 0 {
for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -3146,6 +3158,9 @@ func (m *DeploymentStatus) Size() (n int) {
if m.CollisionCount != nil {
n += 1 + sovGenerated(uint64(*m.CollisionCount))
}
+ if m.TerminatingReplicas != nil {
+ n += 1 + sovGenerated(uint64(*m.TerminatingReplicas))
+ }
return n
}
@@ -3251,6 +3266,9 @@ func (m *ReplicaSetStatus) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
+ if m.TerminatingReplicas != nil {
+ n += 1 + sovGenerated(uint64(*m.TerminatingReplicas))
+ }
return n
}
@@ -3711,6 +3729,7 @@ func (this *DeploymentStatus) String() string {
`Conditions:` + repeatedStringForConditions + `,`,
`ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
`CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
+ `TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`,
`}`,
}, "")
return s
@@ -3797,6 +3816,7 @@ func (this *ReplicaSetStatus) String() string {
`ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
`AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`,
`Conditions:` + repeatedStringForConditions + `,`,
+ `TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`,
`}`,
}, "")
return s
@@ -6261,6 +6281,26 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error {
}
}
m.CollisionCount = &v
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TerminatingReplicas = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -7193,6 +7233,26 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TerminatingReplicas = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto
index c08a4c78b..68c463e25 100644
--- a/vendor/k8s.io/api/apps/v1beta2/generated.proto
+++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto
@@ -323,19 +323,19 @@ message DeploymentStatus {
// +optional
optional int64 observedGeneration = 1;
- // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+ // Total number of non-terminating pods targeted by this deployment (their labels match the selector).
// +optional
optional int32 replicas = 2;
- // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+ // Total number of non-terminating pods targeted by this deployment that have the desired template spec.
// +optional
optional int32 updatedReplicas = 3;
- // readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.
+ // Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
// +optional
optional int32 readyReplicas = 7;
- // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+ // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
// +optional
optional int32 availableReplicas = 4;
@@ -345,6 +345,13 @@ message DeploymentStatus {
// +optional
optional int32 unavailableReplicas = 5;
+ // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
+ // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // +optional
+ optional int32 terminatingReplicas = 9;
+
// Represents the latest available observations of a deployment's current state.
// +patchMergeKey=type
// +patchStrategy=merge
@@ -427,16 +434,16 @@ message ReplicaSetList {
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// List of ReplicaSets.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
repeated ReplicaSet items = 2;
}
// ReplicaSetSpec is the specification of a ReplicaSet.
message ReplicaSetSpec {
- // Replicas is the number of desired replicas.
+ // Replicas is the number of desired pods.
// This is a pointer to distinguish between explicit zero and unspecified.
// Defaults to 1.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
// +optional
optional int32 replicas = 1;
@@ -454,29 +461,36 @@ message ReplicaSetSpec {
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template
// +optional
optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
}
// ReplicaSetStatus represents the current status of a ReplicaSet.
message ReplicaSetStatus {
- // Replicas is the most recently observed number of replicas.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ // Replicas is the most recently observed number of non-terminating pods.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
optional int32 replicas = 1;
- // The number of pods that have labels matching the labels of the pod template of the replicaset.
+ // The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.
// +optional
optional int32 fullyLabeledReplicas = 2;
- // readyReplicas is the number of pods targeted by this ReplicaSet controller with a Ready Condition.
+ // The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.
// +optional
optional int32 readyReplicas = 4;
- // The number of available replicas (ready for at least minReadySeconds) for this replica set.
+ // The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.
// +optional
optional int32 availableReplicas = 5;
+ // The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
+ // and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // +optional
+ optional int32 terminatingReplicas = 7;
+
// ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
// +optional
optional int64 observedGeneration = 3;
@@ -747,6 +761,7 @@ message StatefulSetSpec {
// the network identity of the set. Pods get DNS/hostnames that follow the
// pattern: pod-specific-string.serviceName.default.svc.cluster.local
// where "pod-specific-string" is managed by the StatefulSet controller.
+ // +optional
optional string serviceName = 5;
// podManagementPolicy controls how pods are created during initial scale up,
diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go
index c2624a941..491afc59f 100644
--- a/vendor/k8s.io/api/apps/v1beta2/types.go
+++ b/vendor/k8s.io/api/apps/v1beta2/types.go
@@ -269,6 +269,7 @@ type StatefulSetSpec struct {
// the network identity of the set. Pods get DNS/hostnames that follow the
// pattern: pod-specific-string.serviceName.default.svc.cluster.local
// where "pod-specific-string" is managed by the StatefulSet controller.
+ // +optional
ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"`
// podManagementPolicy controls how pods are created during initial scale up,
@@ -530,19 +531,19 @@ type DeploymentStatus struct {
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
- // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+ // Total number of non-terminating pods targeted by this deployment (their labels match the selector).
// +optional
Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"`
- // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+ // Total number of non-terminating pods targeted by this deployment that have the desired template spec.
// +optional
UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"`
- // readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.
+ // Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
// +optional
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"`
- // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+ // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
// +optional
AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"`
@@ -552,6 +553,13 @@ type DeploymentStatus struct {
// +optional
UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
+ // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
+ // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // +optional
+ TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,9,opt,name=terminatingReplicas"`
+
// Represents the latest available observations of a deployment's current state.
// +patchMergeKey=type
// +patchStrategy=merge
@@ -897,16 +905,16 @@ type ReplicaSetList struct {
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ReplicaSets.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// ReplicaSetSpec is the specification of a ReplicaSet.
type ReplicaSetSpec struct {
- // Replicas is the number of desired replicas.
+ // Replicas is the number of desired pods.
// This is a pointer to distinguish between explicit zero and unspecified.
// Defaults to 1.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
// +optional
Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
@@ -924,29 +932,36 @@ type ReplicaSetSpec struct {
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template
// +optional
Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
}
// ReplicaSetStatus represents the current status of a ReplicaSet.
type ReplicaSetStatus struct {
- // Replicas is the most recently observed number of replicas.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ // Replicas is the most recently observed number of non-terminating pods.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
- // The number of pods that have labels matching the labels of the pod template of the replicaset.
+ // The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.
// +optional
FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
- // readyReplicas is the number of pods targeted by this ReplicaSet controller with a Ready Condition.
+ // The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.
// +optional
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
- // The number of available replicas (ready for at least minReadySeconds) for this replica set.
+ // The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.
// +optional
AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
+ // The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
+ // and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // +optional
+ TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,7,opt,name=terminatingReplicas"`
+
// ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
index beec4b755..408943415 100644
--- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
@@ -177,11 +177,12 @@ func (DeploymentSpec) SwaggerDoc() map[string]string {
var map_DeploymentStatus = map[string]string{
"": "DeploymentStatus is the most recently observed status of the Deployment.",
"observedGeneration": "The generation observed by the deployment controller.",
- "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).",
- "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.",
- "readyReplicas": "readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.",
- "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
+ "replicas": "Total number of non-terminating pods targeted by this deployment (their labels match the selector).",
+ "updatedReplicas": "Total number of non-terminating pods targeted by this deployment that have the desired template spec.",
+ "readyReplicas": "Total number of non-terminating pods targeted by this Deployment with a Ready Condition.",
+ "availableReplicas": "Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.",
"unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
+ "terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
"conditions": "Represents the latest available observations of a deployment's current state.",
"collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
}
@@ -227,7 +228,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string {
var map_ReplicaSetList = map[string]string{
"": "ReplicaSetList is a collection of ReplicaSets.",
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
- "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller",
+ "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset",
}
func (ReplicaSetList) SwaggerDoc() map[string]string {
@@ -236,10 +237,10 @@ func (ReplicaSetList) SwaggerDoc() map[string]string {
var map_ReplicaSetSpec = map[string]string{
"": "ReplicaSetSpec is the specification of a ReplicaSet.",
- "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
+ "replicas": "Replicas is the number of desired pods. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset",
"minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
"selector": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
- "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+ "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template",
}
func (ReplicaSetSpec) SwaggerDoc() map[string]string {
@@ -248,10 +249,11 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string {
var map_ReplicaSetStatus = map[string]string{
"": "ReplicaSetStatus represents the current status of a ReplicaSet.",
- "replicas": "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
- "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.",
- "readyReplicas": "readyReplicas is the number of pods targeted by this ReplicaSet controller with a Ready Condition.",
- "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.",
+ "replicas": "Replicas is the most recently observed number of non-terminating pods. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset",
+ "fullyLabeledReplicas": "The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.",
+ "readyReplicas": "The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.",
+ "availableReplicas": "The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.",
+ "terminatingReplicas": "The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
"observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
"conditions": "Represents the latest available observations of a replica set's current state.",
}
diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go
index cd92792db..917ad4a22 100644
--- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go
@@ -363,6 +363,11 @@ func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
*out = *in
+ if in.TerminatingReplicas != nil {
+ in, out := &in.TerminatingReplicas, &out.TerminatingReplicas
+ *out = new(int32)
+ **out = **in
+ }
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]DeploymentCondition, len(*in))
@@ -517,6 +522,11 @@ func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) {
*out = *in
+ if in.TerminatingReplicas != nil {
+ in, out := &in.TerminatingReplicas, &out.TerminatingReplicas
+ *out = new(int32)
+ **out = **in
+ }
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]ReplicaSetCondition, len(*in))
diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go
index 3bdc89bad..dc3aed4e4 100644
--- a/vendor/k8s.io/api/authentication/v1/doc.go
+++ b/vendor/k8s.io/api/authentication/v1/doc.go
@@ -20,4 +20,4 @@ limitations under the License.
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
-package v1 // import "k8s.io/api/authentication/v1"
+package v1
diff --git a/vendor/k8s.io/api/authentication/v1alpha1/doc.go b/vendor/k8s.io/api/authentication/v1alpha1/doc.go
index eb32def90..c199ccd49 100644
--- a/vendor/k8s.io/api/authentication/v1alpha1/doc.go
+++ b/vendor/k8s.io/api/authentication/v1alpha1/doc.go
@@ -20,4 +20,4 @@ limitations under the License.
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
-package v1alpha1 // import "k8s.io/api/authentication/v1alpha1"
+package v1alpha1
diff --git a/vendor/k8s.io/api/authentication/v1beta1/doc.go b/vendor/k8s.io/api/authentication/v1beta1/doc.go
index 2a2b176e4..af63dc845 100644
--- a/vendor/k8s.io/api/authentication/v1beta1/doc.go
+++ b/vendor/k8s.io/api/authentication/v1beta1/doc.go
@@ -20,4 +20,4 @@ limitations under the License.
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
-package v1beta1 // import "k8s.io/api/authentication/v1beta1"
+package v1beta1
diff --git a/vendor/k8s.io/api/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go
index 77e5a19c4..40bf8006e 100644
--- a/vendor/k8s.io/api/authorization/v1/doc.go
+++ b/vendor/k8s.io/api/authorization/v1/doc.go
@@ -20,4 +20,4 @@ limitations under the License.
// +k8s:prerelease-lifecycle-gen=true
// +groupName=authorization.k8s.io
-package v1 // import "k8s.io/api/authorization/v1"
+package v1
diff --git a/vendor/k8s.io/api/authorization/v1beta1/doc.go b/vendor/k8s.io/api/authorization/v1beta1/doc.go
index c996e35cc..9f7332d49 100644
--- a/vendor/k8s.io/api/authorization/v1beta1/doc.go
+++ b/vendor/k8s.io/api/authorization/v1beta1/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// +groupName=authorization.k8s.io
-package v1beta1 // import "k8s.io/api/authorization/v1beta1"
+package v1beta1
diff --git a/vendor/k8s.io/api/autoscaling/v1/doc.go b/vendor/k8s.io/api/autoscaling/v1/doc.go
index d64c9cbc1..4ee085e16 100644
--- a/vendor/k8s.io/api/autoscaling/v1/doc.go
+++ b/vendor/k8s.io/api/autoscaling/v1/doc.go
@@ -19,4 +19,4 @@ limitations under the License.
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
-package v1 // import "k8s.io/api/autoscaling/v1"
+package v1
diff --git a/vendor/k8s.io/api/autoscaling/v2/doc.go b/vendor/k8s.io/api/autoscaling/v2/doc.go
index aafa2d4de..8dea6339d 100644
--- a/vendor/k8s.io/api/autoscaling/v2/doc.go
+++ b/vendor/k8s.io/api/autoscaling/v2/doc.go
@@ -19,4 +19,4 @@ limitations under the License.
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
-package v2 // import "k8s.io/api/autoscaling/v2"
+package v2
diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2/generated.pb.go
index ece6dedad..40b60ebec 100644
--- a/vendor/k8s.io/api/autoscaling/v2/generated.pb.go
+++ b/vendor/k8s.io/api/autoscaling/v2/generated.pb.go
@@ -751,115 +751,116 @@ func init() {
}
var fileDescriptor_4d5f2c8767749221 = []byte{
- // 1722 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcb, 0x8f, 0x1b, 0x49,
- 0x19, 0x9f, 0xb6, 0x3d, 0xaf, 0xf2, 0x3c, 0x2b, 0x2f, 0x67, 0xa2, 0xd8, 0xa3, 0x26, 0x90, 0x07,
- 0xa4, 0x4d, 0x4c, 0x88, 0x22, 0x72, 0x40, 0xd3, 0x13, 0x20, 0xa3, 0xcc, 0x30, 0x4e, 0x39, 0xc9,
- 0x00, 0x02, 0x94, 0x72, 0x77, 0x8d, 0xa7, 0x18, 0xbb, 0xdb, 0xea, 0x6e, 0x3b, 0x99, 0x48, 0x48,
- 0x5c, 0xb8, 0x23, 0x50, 0x84, 0xf8, 0x1f, 0x22, 0x4e, 0xa0, 0x70, 0x00, 0x09, 0x69, 0xf7, 0x90,
- 0xcb, 0x4a, 0x39, 0xec, 0x21, 0x27, 0x6b, 0xe3, 0x95, 0xf6, 0xb8, 0x7f, 0x40, 0x4e, 0xab, 0x7a,
- 0xf4, 0xd3, 0xaf, 0x71, 0x76, 0x32, 0xd2, 0xdc, 0x5c, 0x55, 0xdf, 0xf7, 0xfb, 0x1e, 0xf5, 0xbd,
- 0xaa, 0x0d, 0xae, 0xee, 0xdf, 0x76, 0x35, 0x6a, 0x17, 0x71, 0x93, 0x16, 0x71, 0xcb, 0xb3, 0x5d,
- 0x03, 0xd7, 0xa9, 0x55, 0x2b, 0xb6, 0x4b, 0xc5, 0x1a, 0xb1, 0x88, 0x83, 0x3d, 0x62, 0x6a, 0x4d,
- 0xc7, 0xf6, 0x6c, 0x78, 0x5e, 0x90, 0x6a, 0xb8, 0x49, 0xb5, 0x08, 0xa9, 0xd6, 0x2e, 0xad, 0x5c,
- 0xaf, 0x51, 0x6f, 0xaf, 0x55, 0xd5, 0x0c, 0xbb, 0x51, 0xac, 0xd9, 0x35, 0xbb, 0xc8, 0x39, 0xaa,
- 0xad, 0x5d, 0xbe, 0xe2, 0x0b, 0xfe, 0x4b, 0x20, 0xad, 0xa8, 0x11, 0xa1, 0x86, 0xed, 0x90, 0x62,
- 0xfb, 0x46, 0x52, 0xda, 0xca, 0xcd, 0x90, 0xa6, 0x81, 0x8d, 0x3d, 0x6a, 0x11, 0xe7, 0xa0, 0xd8,
- 0xdc, 0xaf, 0x71, 0x26, 0x87, 0xb8, 0x76, 0xcb, 0x31, 0xc8, 0x58, 0x5c, 0x6e, 0xb1, 0x41, 0x3c,
- 0xdc, 0x4f, 0x56, 0x71, 0x10, 0x97, 0xd3, 0xb2, 0x3c, 0xda, 0xe8, 0x15, 0x73, 0x6b, 0x14, 0x83,
- 0x6b, 0xec, 0x91, 0x06, 0x4e, 0xf2, 0xa9, 0x5f, 0x29, 0xe0, 0xe2, 0xba, 0x6d, 0x79, 0x98, 0x71,
- 0x20, 0x69, 0xc4, 0x16, 0xf1, 0x1c, 0x6a, 0x54, 0xf8, 0x6f, 0xb8, 0x0e, 0x32, 0x16, 0x6e, 0x90,
- 0x9c, 0xb2, 0xaa, 0x5c, 0x99, 0xd5, 0x8b, 0xaf, 0x3b, 0x85, 0x89, 0x6e, 0xa7, 0x90, 0xf9, 0x25,
- 0x6e, 0x90, 0xf7, 0x9d, 0x42, 0xa1, 0xd7, 0x71, 0x9a, 0x0f, 0xc3, 0x48, 0x10, 0x67, 0x86, 0xdb,
- 0x60, 0xca, 0xc3, 0x4e, 0x8d, 0x78, 0xb9, 0xd4, 0xaa, 0x72, 0x25, 0x5b, 0xba, 0xac, 0x0d, 0xbc,
- 0x3a, 0x4d, 0x48, 0x7f, 0xc8, 0xc9, 0xf5, 0x05, 0x29, 0x6f, 0x4a, 0xac, 0x91, 0x84, 0x81, 0x45,
- 0x30, 0x6b, 0xf8, 0x6a, 0xe7, 0xd2, 0x5c, 0xb5, 0x65, 0x49, 0x3a, 0x1b, 0xda, 0x13, 0xd2, 0xa8,
- 0x5f, 0x0f, 0x31, 0xd4, 0xc3, 0x5e, 0xcb, 0x3d, 0x1a, 0x43, 0x77, 0xc0, 0xb4, 0xd1, 0x72, 0x1c,
- 0x62, 0xf9, 0x96, 0xfe, 0x60, 0xa4, 0xa5, 0x8f, 0x71, 0xbd, 0x45, 0x84, 0x0e, 0xfa, 0xa2, 0x94,
- 0x3a, 0xbd, 0x2e, 0x40, 0x90, 0x8f, 0x36, 0xbe, 0xc1, 0x2f, 0x14, 0x70, 0x61, 0xdd, 0xb1, 0x5d,
- 0xf7, 0x31, 0x71, 0x5c, 0x6a, 0x5b, 0xdb, 0xd5, 0x3f, 0x10, 0xc3, 0x43, 0x64, 0x97, 0x38, 0xc4,
- 0x32, 0x08, 0x5c, 0x05, 0x99, 0x7d, 0x6a, 0x99, 0xd2, 0xdc, 0x39, 0xdf, 0xdc, 0xfb, 0xd4, 0x32,
- 0x11, 0x3f, 0x61, 0x14, 0xdc, 0x21, 0xa9, 0x38, 0x45, 0xc4, 0xda, 0x12, 0x00, 0xb8, 0x49, 0xa5,
- 0x00, 0xa9, 0x15, 0x94, 0x74, 0x60, 0xad, 0xbc, 0x21, 0x4f, 0x50, 0x84, 0x4a, 0xfd, 0xaf, 0x02,
- 0x4e, 0xff, 0xec, 0x99, 0x47, 0x1c, 0x0b, 0xd7, 0x63, 0x81, 0x56, 0x01, 0x53, 0x0d, 0xbe, 0xe6,
- 0x2a, 0x65, 0x4b, 0xdf, 0x1f, 0xe9, 0xb9, 0x0d, 0x93, 0x58, 0x1e, 0xdd, 0xa5, 0xc4, 0x09, 0xe3,
- 0x44, 0x9c, 0x20, 0x09, 0x75, 0xe4, 0x81, 0xa7, 0x7e, 0xda, 0xab, 0xbe, 0x08, 0x9f, 0x8f, 0xa2,
- 0xfe, 0xc7, 0x0a, 0x27, 0xf5, 0x9f, 0x0a, 0x58, 0xba, 0x57, 0x5e, 0xab, 0x08, 0xee, 0xb2, 0x5d,
- 0xa7, 0xc6, 0x01, 0xbc, 0x0d, 0x32, 0xde, 0x41, 0xd3, 0xcf, 0x80, 0x4b, 0xfe, 0x85, 0x3f, 0x3c,
- 0x68, 0xb2, 0x0c, 0x38, 0x9d, 0xa4, 0x67, 0xfb, 0x88, 0x73, 0xc0, 0xef, 0x80, 0xc9, 0x36, 0x93,
- 0xcb, 0xb5, 0x9c, 0xd4, 0xe7, 0x25, 0xeb, 0x24, 0x57, 0x06, 0x89, 0x33, 0x78, 0x07, 0xcc, 0x37,
- 0x89, 0x43, 0x6d, 0xb3, 0x42, 0x0c, 0xdb, 0x32, 0x5d, 0x1e, 0x30, 0x93, 0xfa, 0x19, 0x49, 0x3c,
- 0x5f, 0x8e, 0x1e, 0xa2, 0x38, 0xad, 0xfa, 0x8f, 0x14, 0x58, 0x0c, 0x15, 0x40, 0xad, 0x3a, 0x71,
- 0xe1, 0xef, 0xc1, 0x8a, 0xeb, 0xe1, 0x2a, 0xad, 0xd3, 0xe7, 0xd8, 0xa3, 0xb6, 0xb5, 0x43, 0x2d,
- 0xd3, 0x7e, 0x1a, 0x47, 0xcf, 0x77, 0x3b, 0x85, 0x95, 0xca, 0x40, 0x2a, 0x34, 0x04, 0x01, 0xde,
- 0x07, 0x73, 0x2e, 0xa9, 0x13, 0xc3, 0x13, 0xf6, 0x4a, 0xbf, 0x5c, 0xee, 0x76, 0x0a, 0x73, 0x95,
- 0xc8, 0xfe, 0xfb, 0x4e, 0xe1, 0x54, 0xcc, 0x31, 0xe2, 0x10, 0xc5, 0x98, 0xe1, 0xaf, 0xc1, 0x4c,
- 0x93, 0xfd, 0xa2, 0xc4, 0xcd, 0xa5, 0x56, 0xd3, 0x23, 0x22, 0x24, 0xe9, 0x6b, 0x7d, 0x49, 0x7a,
- 0x69, 0xa6, 0x2c, 0x41, 0x50, 0x00, 0xa7, 0xbe, 0x4a, 0x81, 0x73, 0xf7, 0x6c, 0x87, 0x3e, 0x67,
- 0xc9, 0x5f, 0x2f, 0xdb, 0xe6, 0x9a, 0x04, 0x23, 0x0e, 0x7c, 0x02, 0x66, 0x58, 0x93, 0x31, 0xb1,
- 0x87, 0x65, 0x60, 0xfe, 0x30, 0x22, 0x36, 0xe8, 0x15, 0x5a, 0x73, 0xbf, 0xc6, 0x36, 0x5c, 0x8d,
- 0x51, 0x6b, 0xed, 0x1b, 0x9a, 0xa8, 0x17, 0x5b, 0xc4, 0xc3, 0x61, 0x4a, 0x87, 0x7b, 0x28, 0x40,
- 0x85, 0xbf, 0x02, 0x19, 0xb7, 0x49, 0x0c, 0x19, 0xa0, 0xb7, 0x86, 0x19, 0xd5, 0x5f, 0xc7, 0x4a,
- 0x93, 0x18, 0x61, 0x79, 0x61, 0x2b, 0xc4, 0x11, 0xe1, 0x13, 0x30, 0xe5, 0xf2, 0x40, 0xe6, 0x77,
- 0x99, 0x2d, 0xdd, 0xfe, 0x00, 0x6c, 0x91, 0x08, 0x41, 0x7e, 0x89, 0x35, 0x92, 0xb8, 0xea, 0x67,
- 0x0a, 0x28, 0x0c, 0xe0, 0xd4, 0xc9, 0x1e, 0x6e, 0x53, 0xdb, 0x81, 0x0f, 0xc0, 0x34, 0xdf, 0x79,
- 0xd4, 0x94, 0x0e, 0xbc, 0x76, 0xa8, 0x7b, 0xe3, 0x21, 0xaa, 0x67, 0x59, 0xf6, 0x55, 0x04, 0x3b,
- 0xf2, 0x71, 0xe0, 0x0e, 0x98, 0xe5, 0x3f, 0xef, 0xda, 0x4f, 0x2d, 0xe9, 0xb7, 0x71, 0x40, 0xe7,
- 0x59, 0xd1, 0xaf, 0xf8, 0x00, 0x28, 0xc4, 0x52, 0xff, 0x9c, 0x06, 0xab, 0x03, 0xec, 0x59, 0xb7,
- 0x2d, 0x93, 0xb2, 0x18, 0x87, 0xf7, 0x62, 0x69, 0x7e, 0x33, 0x91, 0xe6, 0x97, 0x46, 0xf1, 0x47,
- 0xd2, 0x7e, 0x33, 0xb8, 0xa0, 0x54, 0x0c, 0x4b, 0xba, 0xf9, 0x7d, 0xa7, 0xd0, 0x67, 0xb0, 0xd2,
- 0x02, 0xa4, 0xf8, 0x65, 0xc0, 0x36, 0x80, 0x75, 0xec, 0x7a, 0x0f, 0x1d, 0x6c, 0xb9, 0x42, 0x12,
- 0x6d, 0x10, 0x79, 0xf5, 0xd7, 0x0e, 0x17, 0xb4, 0x8c, 0x43, 0x5f, 0x91, 0x5a, 0xc0, 0xcd, 0x1e,
- 0x34, 0xd4, 0x47, 0x02, 0xfc, 0x1e, 0x98, 0x72, 0x08, 0x76, 0x6d, 0x2b, 0x97, 0xe1, 0x56, 0x04,
- 0xc1, 0x82, 0xf8, 0x2e, 0x92, 0xa7, 0xf0, 0x2a, 0x98, 0x6e, 0x10, 0xd7, 0xc5, 0x35, 0x92, 0x9b,
- 0xe4, 0x84, 0x41, 0x79, 0xdd, 0x12, 0xdb, 0xc8, 0x3f, 0x57, 0x3f, 0x57, 0xc0, 0x85, 0x01, 0x7e,
- 0xdc, 0xa4, 0xae, 0x07, 0x7f, 0xdb, 0x93, 0x95, 0xda, 0xe1, 0x0c, 0x64, 0xdc, 0x3c, 0x27, 0x83,
- 0x7a, 0xe0, 0xef, 0x44, 0x32, 0x72, 0x07, 0x4c, 0x52, 0x8f, 0x34, 0xfc, 0x3a, 0x53, 0x1a, 0x3f,
- 0x6d, 0xc2, 0x0a, 0xbe, 0xc1, 0x80, 0x90, 0xc0, 0x53, 0x5f, 0xa5, 0x07, 0x9a, 0xc5, 0xd2, 0x16,
- 0xb6, 0xc1, 0x02, 0x5f, 0xc9, 0x9e, 0x49, 0x76, 0xa5, 0x71, 0xc3, 0x8a, 0xc2, 0x90, 0x19, 0x45,
- 0x3f, 0x2b, 0xb5, 0x58, 0xa8, 0xc4, 0x50, 0x51, 0x42, 0x0a, 0xbc, 0x01, 0xb2, 0x0d, 0x6a, 0x21,
- 0xd2, 0xac, 0x53, 0x03, 0xbb, 0xb2, 0x09, 0x2d, 0x76, 0x3b, 0x85, 0xec, 0x56, 0xb8, 0x8d, 0xa2,
- 0x34, 0xf0, 0xc7, 0x20, 0xdb, 0xc0, 0xcf, 0x02, 0x16, 0xd1, 0x2c, 0x4e, 0x49, 0x79, 0xd9, 0xad,
- 0xf0, 0x08, 0x45, 0xe9, 0x60, 0x99, 0xc5, 0x00, 0x6b, 0xb3, 0x6e, 0x2e, 0xc3, 0x9d, 0xfb, 0xdd,
- 0x91, 0x0d, 0x99, 0x97, 0xb7, 0x48, 0xa8, 0x70, 0x6e, 0xe4, 0xc3, 0x40, 0x13, 0xcc, 0x54, 0x65,
- 0xa9, 0xe1, 0x61, 0x95, 0x2d, 0xfd, 0xe4, 0x03, 0xee, 0x4b, 0x22, 0xe8, 0x73, 0x2c, 0x24, 0xfc,
- 0x15, 0x0a, 0x90, 0xd5, 0x97, 0x19, 0x70, 0x71, 0x68, 0x89, 0x84, 0x3f, 0x07, 0xd0, 0xae, 0xba,
- 0xc4, 0x69, 0x13, 0xf3, 0x17, 0xe2, 0x91, 0xc0, 0x66, 0x3a, 0x76, 0x7f, 0x69, 0xfd, 0x2c, 0xcb,
- 0xa6, 0xed, 0x9e, 0x53, 0xd4, 0x87, 0x03, 0x1a, 0x60, 0x9e, 0xe5, 0x98, 0xb8, 0x31, 0x2a, 0xc7,
- 0xc7, 0xf1, 0x12, 0x78, 0x99, 0x4d, 0x03, 0x9b, 0x51, 0x10, 0x14, 0xc7, 0x84, 0x6b, 0x60, 0x51,
- 0x4e, 0x32, 0x89, 0x1b, 0x3c, 0x27, 0xfd, 0xbc, 0xb8, 0x1e, 0x3f, 0x46, 0x49, 0x7a, 0x06, 0x61,
- 0x12, 0x97, 0x3a, 0xc4, 0x0c, 0x20, 0x32, 0x71, 0x88, 0xbb, 0xf1, 0x63, 0x94, 0xa4, 0x87, 0x35,
- 0xb0, 0x20, 0x51, 0xe5, 0xad, 0xe6, 0x26, 0x79, 0x4c, 0x8c, 0x1e, 0x32, 0x65, 0x5b, 0x0a, 0xe2,
- 0x7b, 0x3d, 0x06, 0x83, 0x12, 0xb0, 0xd0, 0x06, 0xc0, 0xf0, 0x8b, 0xa6, 0x9b, 0x9b, 0xe2, 0x42,
- 0xee, 0x8c, 0x1f, 0x25, 0x41, 0xe1, 0x0d, 0x3b, 0x7a, 0xb0, 0xe5, 0xa2, 0x88, 0x08, 0xf5, 0x6f,
- 0x0a, 0x58, 0x4a, 0x0e, 0xa9, 0xc1, 0x7b, 0x40, 0x19, 0xf8, 0x1e, 0xf8, 0x1d, 0x98, 0x11, 0x33,
- 0x8f, 0xed, 0xc8, 0x6b, 0xff, 0xd1, 0x21, 0xcb, 0x1a, 0xae, 0x92, 0x7a, 0x45, 0xb2, 0x8a, 0x20,
- 0xf6, 0x57, 0x28, 0x80, 0x54, 0x5f, 0x64, 0x00, 0x08, 0x73, 0x0a, 0xde, 0x8c, 0xf5, 0xb1, 0xd5,
- 0x44, 0x1f, 0x5b, 0x8a, 0x3e, 0x2e, 0x22, 0x3d, 0xeb, 0x01, 0x98, 0xb2, 0x79, 0x99, 0x91, 0x1a,
- 0x5e, 0x1f, 0xe2, 0xc7, 0x60, 0xde, 0x09, 0x80, 0x74, 0xc0, 0x1a, 0x83, 0xac, 0x53, 0x12, 0x08,
- 0x6e, 0x80, 0x4c, 0xd3, 0x36, 0xfd, 0x29, 0x65, 0xd8, 0x58, 0x57, 0xb6, 0x4d, 0x37, 0x06, 0x37,
- 0xc3, 0x34, 0x66, 0xbb, 0x88, 0x43, 0xb0, 0x29, 0xd1, 0xff, 0x94, 0xc0, 0xc3, 0x31, 0x5b, 0x2a,
- 0x0e, 0x81, 0xeb, 0xf7, 0x60, 0x17, 0xde, 0xf3, 0x4f, 0x50, 0x00, 0x07, 0xff, 0x08, 0x96, 0x8d,
- 0xe4, 0x03, 0x38, 0x37, 0x3d, 0x72, 0xb0, 0x1a, 0xfa, 0x75, 0x40, 0x3f, 0xd3, 0xed, 0x14, 0x96,
- 0x7b, 0x48, 0x50, 0xaf, 0x24, 0x66, 0x19, 0x91, 0xef, 0x26, 0x59, 0xe7, 0x86, 0x59, 0xd6, 0xef,
- 0x85, 0x28, 0x2c, 0xf3, 0x4f, 0x50, 0x00, 0xa7, 0xfe, 0x3d, 0x03, 0xe6, 0x62, 0x6f, 0xb1, 0x63,
- 0x8e, 0x0c, 0x91, 0xcc, 0x47, 0x16, 0x19, 0x02, 0xee, 0x48, 0x23, 0x43, 0x40, 0x1e, 0x53, 0x64,
- 0x08, 0x61, 0xc7, 0x14, 0x19, 0x11, 0xcb, 0xfa, 0x44, 0xc6, 0x27, 0x29, 0x3f, 0x32, 0xc4, 0xb0,
- 0x70, 0xb8, 0xc8, 0x10, 0xb4, 0x91, 0xc8, 0xd8, 0x8e, 0x3e, 0x6f, 0x47, 0xcc, 0x6a, 0x9a, 0xef,
- 0x56, 0xed, 0x41, 0x0b, 0x5b, 0x1e, 0xf5, 0x0e, 0xf4, 0xd9, 0x9e, 0xa7, 0xb0, 0x09, 0xe6, 0x70,
- 0x9b, 0x38, 0xb8, 0x46, 0xf8, 0xb6, 0x8c, 0x8f, 0x71, 0x71, 0x97, 0xd8, 0x4b, 0x74, 0x2d, 0x82,
- 0x83, 0x62, 0xa8, 0xac, 0xa5, 0xcb, 0xf5, 0x23, 0x2f, 0x78, 0xe2, 0xca, 0x2e, 0xc7, 0x5b, 0xfa,
- 0x5a, 0xcf, 0x29, 0xea, 0xc3, 0xa1, 0xfe, 0x35, 0x05, 0x96, 0x7b, 0x3e, 0x2e, 0x84, 0x4e, 0x51,
- 0x3e, 0x92, 0x53, 0x52, 0xc7, 0xe8, 0x94, 0xf4, 0xd8, 0x4e, 0xf9, 0x77, 0x0a, 0xc0, 0xde, 0xfe,
- 0x00, 0x0f, 0xf8, 0x58, 0x61, 0x38, 0xb4, 0x4a, 0x4c, 0x71, 0xfc, 0x2d, 0x67, 0xe0, 0xe8, 0x38,
- 0x12, 0x85, 0x45, 0x49, 0x39, 0x47, 0xff, 0x91, 0x35, 0xfc, 0xa4, 0x95, 0x3e, 0xb2, 0x4f, 0x5a,
- 0xea, 0xff, 0x92, 0x7e, 0x3b, 0x81, 0x9f, 0xcf, 0xfa, 0xdd, 0x72, 0xfa, 0x78, 0x6e, 0x59, 0xfd,
- 0x8f, 0x02, 0x96, 0x92, 0x63, 0xc4, 0x09, 0xf9, 0x76, 0xfa, 0xff, 0xb8, 0xea, 0x27, 0xf1, 0xbb,
- 0xe9, 0x4b, 0x05, 0x9c, 0x3e, 0x39, 0x7f, 0x93, 0xa8, 0xff, 0xea, 0x55, 0xf7, 0x04, 0xfc, 0xd9,
- 0xa1, 0xff, 0xf4, 0xf5, 0xbb, 0xfc, 0xc4, 0x9b, 0x77, 0xf9, 0x89, 0xb7, 0xef, 0xf2, 0x13, 0x7f,
- 0xea, 0xe6, 0x95, 0xd7, 0xdd, 0xbc, 0xf2, 0xa6, 0x9b, 0x57, 0xde, 0x76, 0xf3, 0xca, 0x17, 0xdd,
- 0xbc, 0xf2, 0x97, 0x2f, 0xf3, 0x13, 0xbf, 0x39, 0x3f, 0xf0, 0x9f, 0xc2, 0x6f, 0x02, 0x00, 0x00,
- 0xff, 0xff, 0xca, 0x8b, 0x47, 0xba, 0x45, 0x1c, 0x00, 0x00,
+ // 1742 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xc9, 0x8f, 0x1b, 0x4b,
+ 0x19, 0x9f, 0xb6, 0x3d, 0x5b, 0x79, 0xd6, 0xca, 0xe6, 0x4c, 0x14, 0x7b, 0xd4, 0x04, 0xb2, 0x40,
+ 0xda, 0xc4, 0x84, 0x28, 0x22, 0x07, 0x34, 0x3d, 0x01, 0x32, 0xca, 0x0c, 0xe3, 0x94, 0x27, 0x19,
+ 0x76, 0xa5, 0xdc, 0x5d, 0xe3, 0x29, 0xc6, 0xee, 0xb6, 0xba, 0xdb, 0x4e, 0x26, 0x12, 0x12, 0x17,
+ 0xee, 0x08, 0x14, 0xf1, 0x4f, 0x44, 0x9c, 0x40, 0xe1, 0x00, 0x12, 0x12, 0x1c, 0x72, 0x41, 0xca,
+ 0x81, 0x43, 0x4e, 0x16, 0x31, 0xd2, 0x3b, 0xbe, 0xe3, 0x3b, 0xe4, 0xf4, 0x54, 0x4b, 0xaf, 0xde,
+ 0xc6, 0x79, 0x93, 0x91, 0xe6, 0xe6, 0xaa, 0xfa, 0xbe, 0xdf, 0xb7, 0xd4, 0xb7, 0x55, 0x1b, 0x5c,
+ 0x3f, 0xb8, 0xeb, 0x6a, 0xd4, 0x2e, 0xe2, 0x26, 0x2d, 0xe2, 0x96, 0x67, 0xbb, 0x06, 0xae, 0x53,
+ 0xab, 0x56, 0x6c, 0x97, 0x8a, 0x35, 0x62, 0x11, 0x07, 0x7b, 0xc4, 0xd4, 0x9a, 0x8e, 0xed, 0xd9,
+ 0xf0, 0xa2, 0x20, 0xd5, 0x70, 0x93, 0x6a, 0x11, 0x52, 0xad, 0x5d, 0x5a, 0xb9, 0x59, 0xa3, 0xde,
+ 0x7e, 0xab, 0xaa, 0x19, 0x76, 0xa3, 0x58, 0xb3, 0x6b, 0x76, 0x91, 0x73, 0x54, 0x5b, 0x7b, 0x7c,
+ 0xc5, 0x17, 0xfc, 0x97, 0x40, 0x5a, 0x51, 0x23, 0x42, 0x0d, 0xdb, 0x21, 0xc5, 0xf6, 0xad, 0xa4,
+ 0xb4, 0x95, 0xdb, 0x21, 0x4d, 0x03, 0x1b, 0xfb, 0xd4, 0x22, 0xce, 0x61, 0xb1, 0x79, 0x50, 0xe3,
+ 0x4c, 0x0e, 0x71, 0xed, 0x96, 0x63, 0x90, 0xb1, 0xb8, 0xdc, 0x62, 0x83, 0x78, 0xb8, 0x9f, 0xac,
+ 0xe2, 0x20, 0x2e, 0xa7, 0x65, 0x79, 0xb4, 0xd1, 0x2b, 0xe6, 0xce, 0x28, 0x06, 0xd7, 0xd8, 0x27,
+ 0x0d, 0x9c, 0xe4, 0x53, 0x3f, 0x53, 0xc0, 0xe5, 0x75, 0xdb, 0xf2, 0x30, 0xe3, 0x40, 0xd2, 0x88,
+ 0x2d, 0xe2, 0x39, 0xd4, 0xa8, 0xf0, 0xdf, 0x70, 0x1d, 0x64, 0x2c, 0xdc, 0x20, 0x39, 0x65, 0x55,
+ 0xb9, 0x36, 0xab, 0x17, 0xdf, 0x74, 0x0a, 0x13, 0xdd, 0x4e, 0x21, 0xf3, 0x63, 0xdc, 0x20, 0x1f,
+ 0x3a, 0x85, 0x42, 0xaf, 0xe3, 0x34, 0x1f, 0x86, 0x91, 0x20, 0xce, 0x0c, 0xb7, 0xc1, 0x94, 0x87,
+ 0x9d, 0x1a, 0xf1, 0x72, 0xa9, 0x55, 0xe5, 0x5a, 0xb6, 0x74, 0x55, 0x1b, 0x78, 0x75, 0x9a, 0x90,
+ 0xbe, 0xc3, 0xc9, 0xf5, 0x05, 0x29, 0x6f, 0x4a, 0xac, 0x91, 0x84, 0x81, 0x45, 0x30, 0x6b, 0xf8,
+ 0x6a, 0xe7, 0xd2, 0x5c, 0xb5, 0x65, 0x49, 0x3a, 0x1b, 0xda, 0x13, 0xd2, 0xa8, 0x9f, 0x0f, 0x31,
+ 0xd4, 0xc3, 0x5e, 0xcb, 0x3d, 0x1e, 0x43, 0x77, 0xc1, 0xb4, 0xd1, 0x72, 0x1c, 0x62, 0xf9, 0x96,
+ 0x7e, 0x6b, 0xa4, 0xa5, 0x4f, 0x70, 0xbd, 0x45, 0x84, 0x0e, 0xfa, 0xa2, 0x94, 0x3a, 0xbd, 0x2e,
+ 0x40, 0x90, 0x8f, 0x36, 0xbe, 0xc1, 0x2f, 0x15, 0x70, 0x69, 0xdd, 0xb1, 0x5d, 0xf7, 0x09, 0x71,
+ 0x5c, 0x6a, 0x5b, 0xdb, 0xd5, 0x5f, 0x13, 0xc3, 0x43, 0x64, 0x8f, 0x38, 0xc4, 0x32, 0x08, 0x5c,
+ 0x05, 0x99, 0x03, 0x6a, 0x99, 0xd2, 0xdc, 0x39, 0xdf, 0xdc, 0x87, 0xd4, 0x32, 0x11, 0x3f, 0x61,
+ 0x14, 0xdc, 0x21, 0xa9, 0x38, 0x45, 0xc4, 0xda, 0x12, 0x00, 0xb8, 0x49, 0xa5, 0x00, 0xa9, 0x15,
+ 0x94, 0x74, 0x60, 0xad, 0xbc, 0x21, 0x4f, 0x50, 0x84, 0x4a, 0xfd, 0xbb, 0x02, 0xce, 0xfe, 0xe0,
+ 0xb9, 0x47, 0x1c, 0x0b, 0xd7, 0x63, 0x81, 0x56, 0x01, 0x53, 0x0d, 0xbe, 0xe6, 0x2a, 0x65, 0x4b,
+ 0xdf, 0x1c, 0xe9, 0xb9, 0x0d, 0x93, 0x58, 0x1e, 0xdd, 0xa3, 0xc4, 0x09, 0xe3, 0x44, 0x9c, 0x20,
+ 0x09, 0x75, 0xec, 0x81, 0xa7, 0xfe, 0xbb, 0x57, 0x7d, 0x11, 0x3e, 0x9f, 0x44, 0xfd, 0x4f, 0x15,
+ 0x4e, 0xea, 0x9f, 0x15, 0xb0, 0xf4, 0xa0, 0xbc, 0x56, 0x11, 0xdc, 0x65, 0xbb, 0x4e, 0x8d, 0x43,
+ 0x78, 0x17, 0x64, 0xbc, 0xc3, 0xa6, 0x9f, 0x01, 0x57, 0xfc, 0x0b, 0xdf, 0x39, 0x6c, 0xb2, 0x0c,
+ 0x38, 0x9b, 0xa4, 0x67, 0xfb, 0x88, 0x73, 0xc0, 0xaf, 0x81, 0xc9, 0x36, 0x93, 0xcb, 0xb5, 0x9c,
+ 0xd4, 0xe7, 0x25, 0xeb, 0x24, 0x57, 0x06, 0x89, 0x33, 0x78, 0x0f, 0xcc, 0x37, 0x89, 0x43, 0x6d,
+ 0xb3, 0x42, 0x0c, 0xdb, 0x32, 0x5d, 0x1e, 0x30, 0x93, 0xfa, 0x39, 0x49, 0x3c, 0x5f, 0x8e, 0x1e,
+ 0xa2, 0x38, 0xad, 0xfa, 0x45, 0x0a, 0x2c, 0x86, 0x0a, 0xa0, 0x56, 0x9d, 0xb8, 0xf0, 0x57, 0x60,
+ 0xc5, 0xf5, 0x70, 0x95, 0xd6, 0xe9, 0x0b, 0xec, 0x51, 0xdb, 0xda, 0xa5, 0x96, 0x69, 0x3f, 0x8b,
+ 0xa3, 0xe7, 0xbb, 0x9d, 0xc2, 0x4a, 0x65, 0x20, 0x15, 0x1a, 0x82, 0x00, 0x1f, 0x82, 0x39, 0x97,
+ 0xd4, 0x89, 0xe1, 0x09, 0x7b, 0xa5, 0x5f, 0xae, 0x76, 0x3b, 0x85, 0xb9, 0x4a, 0x64, 0xff, 0x43,
+ 0xa7, 0x70, 0x26, 0xe6, 0x18, 0x71, 0x88, 0x62, 0xcc, 0xf0, 0xa7, 0x60, 0xa6, 0xc9, 0x7e, 0x51,
+ 0xe2, 0xe6, 0x52, 0xab, 0xe9, 0x11, 0x11, 0x92, 0xf4, 0xb5, 0xbe, 0x24, 0xbd, 0x34, 0x53, 0x96,
+ 0x20, 0x28, 0x80, 0x83, 0x3f, 0x07, 0xb3, 0x9e, 0x5d, 0x27, 0x0e, 0xb6, 0x0c, 0x92, 0xcb, 0xf0,
+ 0x38, 0xd1, 0x22, 0xd8, 0x41, 0x43, 0xd0, 0x9a, 0x07, 0x35, 0x2e, 0xcc, 0xef, 0x56, 0xda, 0xa3,
+ 0x16, 0xb6, 0x3c, 0xea, 0x1d, 0xea, 0xf3, 0xac, 0x8e, 0xec, 0xf8, 0x20, 0x28, 0xc4, 0x53, 0x5f,
+ 0xa7, 0xc0, 0x85, 0x07, 0xb6, 0x43, 0x5f, 0xb0, 0xca, 0x52, 0x2f, 0xdb, 0xe6, 0x9a, 0xd4, 0x94,
+ 0x38, 0xf0, 0x29, 0x98, 0x61, 0x1d, 0xcc, 0xc4, 0x1e, 0x96, 0x51, 0xff, 0xed, 0x61, 0x72, 0x5d,
+ 0x8d, 0x51, 0x6b, 0xed, 0x5b, 0x9a, 0x28, 0x46, 0x5b, 0xc4, 0xc3, 0x61, 0xbd, 0x08, 0xf7, 0x50,
+ 0x80, 0x0a, 0x7f, 0x02, 0x32, 0x6e, 0x93, 0x18, 0x32, 0xfa, 0xef, 0x0c, 0xf3, 0x58, 0x7f, 0x1d,
+ 0x2b, 0x4d, 0x62, 0x84, 0xb5, 0x8b, 0xad, 0x10, 0x47, 0x84, 0x4f, 0xc1, 0x94, 0xcb, 0xb3, 0x84,
+ 0x07, 0x4a, 0xb6, 0x74, 0xf7, 0x23, 0xb0, 0x45, 0x96, 0x05, 0xc9, 0x2b, 0xd6, 0x48, 0xe2, 0xaa,
+ 0xff, 0x51, 0x40, 0x61, 0x00, 0xa7, 0x4e, 0xf6, 0x71, 0x9b, 0xda, 0x0e, 0x7c, 0x04, 0xa6, 0xf9,
+ 0xce, 0xe3, 0xa6, 0x74, 0xe0, 0x8d, 0x23, 0x05, 0x05, 0x8f, 0x7f, 0x3d, 0xcb, 0x52, 0xbb, 0x22,
+ 0xd8, 0x91, 0x8f, 0x03, 0x77, 0xc1, 0x2c, 0xff, 0x79, 0xdf, 0x7e, 0x66, 0x49, 0xbf, 0x8d, 0x03,
+ 0xca, 0x23, 0xa1, 0xe2, 0x03, 0xa0, 0x10, 0x4b, 0xfd, 0x5d, 0x1a, 0xac, 0x0e, 0xb0, 0x67, 0xdd,
+ 0xb6, 0x4c, 0xca, 0x12, 0x08, 0x3e, 0x88, 0xd5, 0x90, 0xdb, 0x89, 0x1a, 0x72, 0x65, 0x14, 0x7f,
+ 0xa4, 0xa6, 0x6c, 0x06, 0x17, 0x94, 0x8a, 0x61, 0x49, 0x37, 0x7f, 0xe8, 0x14, 0xfa, 0x4c, 0x6d,
+ 0x5a, 0x80, 0x14, 0xbf, 0x0c, 0xd8, 0x06, 0xb0, 0x8e, 0x5d, 0x6f, 0xc7, 0xc1, 0x96, 0x2b, 0x24,
+ 0xd1, 0x06, 0x91, 0x57, 0x7f, 0xe3, 0x68, 0x41, 0xcb, 0x38, 0xf4, 0x15, 0xa9, 0x05, 0xdc, 0xec,
+ 0x41, 0x43, 0x7d, 0x24, 0xc0, 0x6f, 0x80, 0x29, 0x87, 0x60, 0xd7, 0xb6, 0x78, 0x62, 0xce, 0x86,
+ 0xc1, 0x82, 0xf8, 0x2e, 0x92, 0xa7, 0xf0, 0x3a, 0x98, 0x6e, 0x10, 0xd7, 0xc5, 0x35, 0x92, 0x9b,
+ 0xe4, 0x84, 0x41, 0xed, 0xde, 0x12, 0xdb, 0xc8, 0x3f, 0x57, 0xff, 0xab, 0x80, 0x4b, 0x03, 0xfc,
+ 0xb8, 0x49, 0x5d, 0x0f, 0xfe, 0xa2, 0x27, 0x2b, 0xb5, 0xa3, 0x19, 0xc8, 0xb8, 0x79, 0x4e, 0x06,
+ 0xc5, 0xc6, 0xdf, 0x89, 0x64, 0xe4, 0x2e, 0x98, 0xa4, 0x1e, 0x69, 0xf8, 0x45, 0xac, 0x34, 0x7e,
+ 0xda, 0x84, 0xed, 0x61, 0x83, 0x01, 0x21, 0x81, 0xa7, 0xbe, 0x4e, 0x0f, 0x34, 0x8b, 0xa5, 0x2d,
+ 0x6c, 0x83, 0x05, 0xbe, 0x92, 0x0d, 0x99, 0xec, 0x49, 0xe3, 0x86, 0x15, 0x85, 0x21, 0x03, 0x90,
+ 0x7e, 0x5e, 0x6a, 0xb1, 0x50, 0x89, 0xa1, 0xa2, 0x84, 0x14, 0x78, 0x0b, 0x64, 0x1b, 0xd4, 0x42,
+ 0xa4, 0x59, 0xa7, 0x06, 0x76, 0x65, 0x87, 0x5b, 0xec, 0x76, 0x0a, 0xd9, 0xad, 0x70, 0x1b, 0x45,
+ 0x69, 0xe0, 0x77, 0x41, 0xb6, 0x81, 0x9f, 0x07, 0x2c, 0xa2, 0x13, 0x9d, 0x91, 0xf2, 0xb2, 0x5b,
+ 0xe1, 0x11, 0x8a, 0xd2, 0xc1, 0x32, 0x8b, 0x01, 0xd6, 0xc3, 0xdd, 0x5c, 0x86, 0x3b, 0xf7, 0xeb,
+ 0x23, 0xbb, 0x3d, 0x2f, 0x6f, 0x91, 0x50, 0xe1, 0xdc, 0xc8, 0x87, 0x81, 0x26, 0x98, 0xa9, 0xca,
+ 0x52, 0xc3, 0xc3, 0x2a, 0x5b, 0xfa, 0xde, 0x47, 0xdc, 0x97, 0x44, 0xd0, 0xe7, 0x58, 0x48, 0xf8,
+ 0x2b, 0x14, 0x20, 0xab, 0xaf, 0x32, 0xe0, 0xf2, 0xd0, 0x12, 0x09, 0x7f, 0x08, 0xa0, 0x5d, 0x75,
+ 0x89, 0xd3, 0x26, 0xe6, 0x8f, 0xc4, 0x0b, 0x84, 0x0d, 0x8c, 0xec, 0xfe, 0xd2, 0xfa, 0x79, 0x96,
+ 0x4d, 0xdb, 0x3d, 0xa7, 0xa8, 0x0f, 0x07, 0x34, 0xc0, 0x3c, 0xcb, 0x31, 0x71, 0x63, 0x54, 0xce,
+ 0xa6, 0xe3, 0x25, 0xf0, 0x32, 0x1b, 0x35, 0x36, 0xa3, 0x20, 0x28, 0x8e, 0x09, 0xd7, 0xc0, 0xa2,
+ 0x1c, 0x93, 0x12, 0x37, 0x78, 0x41, 0xfa, 0x79, 0x71, 0x3d, 0x7e, 0x8c, 0x92, 0xf4, 0x0c, 0xc2,
+ 0x24, 0x2e, 0x75, 0x88, 0x19, 0x40, 0x64, 0xe2, 0x10, 0xf7, 0xe3, 0xc7, 0x28, 0x49, 0x0f, 0x6b,
+ 0x60, 0x41, 0xa2, 0xca, 0x5b, 0xcd, 0x4d, 0xf2, 0x98, 0x18, 0x3d, 0xc1, 0xca, 0xb6, 0x14, 0xc4,
+ 0xf7, 0x7a, 0x0c, 0x06, 0x25, 0x60, 0xa1, 0x0d, 0x80, 0xe1, 0x17, 0x4d, 0x37, 0x37, 0xc5, 0x85,
+ 0xdc, 0x1b, 0x3f, 0x4a, 0x82, 0xc2, 0x1b, 0x76, 0xf4, 0x60, 0xcb, 0x45, 0x11, 0x11, 0xea, 0x1f,
+ 0x15, 0xb0, 0x94, 0x9c, 0x80, 0x83, 0xc7, 0x86, 0x32, 0xf0, 0xb1, 0xf1, 0x4b, 0x30, 0x23, 0x06,
+ 0x2a, 0xdb, 0x91, 0xd7, 0xfe, 0x9d, 0x23, 0x96, 0x35, 0x5c, 0x25, 0xf5, 0x8a, 0x64, 0x15, 0x41,
+ 0xec, 0xaf, 0x50, 0x00, 0xa9, 0xbe, 0xcc, 0x00, 0x10, 0xe6, 0x14, 0xbc, 0x1d, 0xeb, 0x63, 0xab,
+ 0x89, 0x3e, 0xb6, 0x14, 0x7d, 0xb9, 0x44, 0x7a, 0xd6, 0x23, 0x30, 0x65, 0xf3, 0x32, 0x23, 0x35,
+ 0xbc, 0x39, 0xc4, 0x8f, 0xc1, 0xbc, 0x13, 0x00, 0xe9, 0x80, 0x35, 0x06, 0x59, 0xa7, 0x24, 0x10,
+ 0xdc, 0x00, 0x99, 0xa6, 0x6d, 0xfa, 0x53, 0xca, 0xb0, 0x99, 0xb1, 0x6c, 0x9b, 0x6e, 0x0c, 0x6e,
+ 0x86, 0x69, 0xcc, 0x76, 0x11, 0x87, 0x60, 0x23, 0xa8, 0x3f, 0xf9, 0xc9, 0x31, 0xb1, 0x38, 0x04,
+ 0xae, 0xdf, 0xd7, 0x00, 0xe1, 0x3d, 0xff, 0x04, 0x05, 0x70, 0xf0, 0x37, 0x60, 0xd9, 0x48, 0xbe,
+ 0xae, 0x73, 0xd3, 0x23, 0x07, 0xab, 0xa1, 0x9f, 0x1e, 0xf4, 0x73, 0xdd, 0x4e, 0x61, 0xb9, 0x87,
+ 0x04, 0xf5, 0x4a, 0x62, 0x96, 0x11, 0xf9, 0x28, 0x93, 0x75, 0x6e, 0x98, 0x65, 0xfd, 0x9e, 0x9f,
+ 0xc2, 0x32, 0xff, 0x04, 0x05, 0x70, 0xea, 0x9f, 0x32, 0x60, 0x2e, 0xf6, 0xd0, 0x3b, 0xe1, 0xc8,
+ 0x10, 0xc9, 0x7c, 0x6c, 0x91, 0x21, 0xe0, 0x8e, 0x35, 0x32, 0x04, 0xe4, 0x09, 0x45, 0x86, 0x10,
+ 0x76, 0x42, 0x91, 0x11, 0xb1, 0xac, 0x4f, 0x64, 0xfc, 0x2b, 0xe5, 0x47, 0x86, 0x18, 0x16, 0x8e,
+ 0x16, 0x19, 0x82, 0x36, 0x12, 0x19, 0xdb, 0xd1, 0xb7, 0xf3, 0xf8, 0x2f, 0xb7, 0xd9, 0x9e, 0x77,
+ 0xb6, 0x09, 0xe6, 0x70, 0x9b, 0x38, 0xb8, 0x46, 0xf8, 0xb6, 0x8c, 0x8f, 0x71, 0x71, 0x97, 0xd8,
+ 0x33, 0x77, 0x2d, 0x82, 0x83, 0x62, 0xa8, 0xac, 0xa5, 0xcb, 0xf5, 0x63, 0x2f, 0x78, 0x3f, 0xcb,
+ 0x2e, 0xc7, 0x5b, 0xfa, 0x5a, 0xcf, 0x29, 0xea, 0xc3, 0xa1, 0xfe, 0x21, 0x05, 0x96, 0x7b, 0xbe,
+ 0x5c, 0x84, 0x4e, 0x51, 0x3e, 0x91, 0x53, 0x52, 0x27, 0xe8, 0x94, 0xf4, 0xd8, 0x4e, 0xf9, 0x6b,
+ 0x0a, 0xc0, 0xde, 0xfe, 0x00, 0x0f, 0xf9, 0x58, 0x61, 0x38, 0xb4, 0x4a, 0x4c, 0x71, 0xfc, 0x15,
+ 0x67, 0xe0, 0xe8, 0x38, 0x12, 0x85, 0x45, 0x49, 0x39, 0xc7, 0xff, 0x05, 0x37, 0xfc, 0x5e, 0x96,
+ 0x3e, 0xb6, 0xef, 0x65, 0xea, 0x3f, 0x92, 0x7e, 0x3b, 0x85, 0xdf, 0xe6, 0xfa, 0xdd, 0x72, 0xfa,
+ 0x64, 0x6e, 0x59, 0xfd, 0x9b, 0x02, 0x96, 0x92, 0x63, 0xc4, 0x29, 0xf9, 0x30, 0xfb, 0xcf, 0xb8,
+ 0xea, 0xa7, 0xf1, 0xa3, 0xec, 0x2b, 0x05, 0x9c, 0x3d, 0x3d, 0xff, 0xc1, 0xa8, 0x7f, 0xe9, 0x55,
+ 0xf7, 0x14, 0xfc, 0x93, 0xa2, 0x7f, 0xff, 0xcd, 0xfb, 0xfc, 0xc4, 0xdb, 0xf7, 0xf9, 0x89, 0x77,
+ 0xef, 0xf3, 0x13, 0xbf, 0xed, 0xe6, 0x95, 0x37, 0xdd, 0xbc, 0xf2, 0xb6, 0x9b, 0x57, 0xde, 0x75,
+ 0xf3, 0xca, 0xff, 0xba, 0x79, 0xe5, 0xf7, 0xff, 0xcf, 0x4f, 0xfc, 0xec, 0xe2, 0xc0, 0xbf, 0x21,
+ 0xbf, 0x0c, 0x00, 0x00, 0xff, 0xff, 0xbe, 0x23, 0xae, 0x54, 0xa2, 0x1c, 0x00, 0x00,
}
func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) {
@@ -1126,6 +1127,18 @@ func (m *HPAScalingRules) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.Tolerance != nil {
+ {
+ size, err := m.Tolerance.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
if m.StabilizationWindowSeconds != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.StabilizationWindowSeconds))
i--
@@ -2203,6 +2216,10 @@ func (m *HPAScalingRules) Size() (n int) {
if m.StabilizationWindowSeconds != nil {
n += 1 + sovGenerated(uint64(*m.StabilizationWindowSeconds))
}
+ if m.Tolerance != nil {
+ l = m.Tolerance.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -2619,6 +2636,7 @@ func (this *HPAScalingRules) String() string {
`SelectPolicy:` + valueToStringGenerated(this.SelectPolicy) + `,`,
`Policies:` + repeatedStringForPolicies + `,`,
`StabilizationWindowSeconds:` + valueToStringGenerated(this.StabilizationWindowSeconds) + `,`,
+ `Tolerance:` + strings.Replace(fmt.Sprintf("%v", this.Tolerance), "Quantity", "resource.Quantity", 1) + `,`,
`}`,
}, "")
return s
@@ -3770,6 +3788,42 @@ func (m *HPAScalingRules) Unmarshal(dAtA []byte) error {
}
}
m.StabilizationWindowSeconds = &v
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tolerance", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Tolerance == nil {
+ m.Tolerance = &resource.Quantity{}
+ }
+ if err := m.Tolerance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.proto b/vendor/k8s.io/api/autoscaling/v2/generated.proto
index 4e6dc0592..04c34d6e1 100644
--- a/vendor/k8s.io/api/autoscaling/v2/generated.proto
+++ b/vendor/k8s.io/api/autoscaling/v2/generated.proto
@@ -112,12 +112,18 @@ message HPAScalingPolicy {
optional int32 periodSeconds = 3;
}
-// HPAScalingRules configures the scaling behavior for one direction.
-// These Rules are applied after calculating DesiredReplicas from metrics for the HPA.
+// HPAScalingRules configures the scaling behavior for one direction via
+// scaling Policy Rules and a configurable metric tolerance.
+//
+// Scaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA.
// They can limit the scaling velocity by specifying scaling policies.
// They can prevent flapping by specifying the stabilization window, so that the
// number of replicas is not set instantly, instead, the safest value from the stabilization
// window is chosen.
+//
+// The tolerance is applied to the metric values and prevents scaling too
+// eagerly for small metric variations. (Note that setting a tolerance requires
+// enabling the alpha HPAConfigurableTolerance feature gate.)
message HPAScalingRules {
// stabilizationWindowSeconds is the number of seconds for which past recommendations should be
// considered while scaling up or scaling down.
@@ -134,10 +140,28 @@ message HPAScalingRules {
optional string selectPolicy = 1;
// policies is a list of potential scaling polices which can be used during scaling.
- // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
+ // If not set, use the default values:
+ // - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window.
+ // - For scale down: allow all pods to be removed in a 15s window.
// +listType=atomic
// +optional
repeated HPAScalingPolicy policies = 2;
+
+ // tolerance is the tolerance on the ratio between the current and desired
+ // metric value under which no updates are made to the desired number of
+ // replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not
+ // set, the default cluster-wide tolerance is applied (by default 10%).
+ //
+ // For example, if autoscaling is configured with a memory consumption target of 100Mi,
+ // and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be
+ // triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
+ //
+ // This is an alpha field and requires enabling the HPAConfigurableTolerance
+ // feature gate.
+ //
+ // +featureGate=HPAConfigurableTolerance
+ // +optional
+ optional .k8s.io.apimachinery.pkg.api.resource.Quantity tolerance = 4;
}
// HorizontalPodAutoscaler is the configuration for a horizontal pod
diff --git a/vendor/k8s.io/api/autoscaling/v2/types.go b/vendor/k8s.io/api/autoscaling/v2/types.go
index 99e8db09d..9ce69b1ed 100644
--- a/vendor/k8s.io/api/autoscaling/v2/types.go
+++ b/vendor/k8s.io/api/autoscaling/v2/types.go
@@ -171,12 +171,18 @@ const (
DisabledPolicySelect ScalingPolicySelect = "Disabled"
)
-// HPAScalingRules configures the scaling behavior for one direction.
-// These Rules are applied after calculating DesiredReplicas from metrics for the HPA.
+// HPAScalingRules configures the scaling behavior for one direction via
+// scaling Policy Rules and a configurable metric tolerance.
+//
+// Scaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA.
// They can limit the scaling velocity by specifying scaling policies.
// They can prevent flapping by specifying the stabilization window, so that the
// number of replicas is not set instantly, instead, the safest value from the stabilization
// window is chosen.
+//
+// The tolerance is applied to the metric values and prevents scaling too
+// eagerly for small metric variations. (Note that setting a tolerance requires
+// enabling the alpha HPAConfigurableTolerance feature gate.)
type HPAScalingRules struct {
// stabilizationWindowSeconds is the number of seconds for which past recommendations should be
// considered while scaling up or scaling down.
@@ -193,10 +199,28 @@ type HPAScalingRules struct {
SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"`
// policies is a list of potential scaling polices which can be used during scaling.
- // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
+ // If not set, use the default values:
+ // - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window.
+ // - For scale down: allow all pods to be removed in a 15s window.
// +listType=atomic
// +optional
Policies []HPAScalingPolicy `json:"policies,omitempty" listType:"atomic" protobuf:"bytes,2,rep,name=policies"`
+
+ // tolerance is the tolerance on the ratio between the current and desired
+ // metric value under which no updates are made to the desired number of
+ // replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not
+ // set, the default cluster-wide tolerance is applied (by default 10%).
+ //
+ // For example, if autoscaling is configured with a memory consumption target of 100Mi,
+ // and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be
+ // triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
+ //
+ // This is an alpha field and requires enabling the HPAConfigurableTolerance
+ // feature gate.
+ //
+ // +featureGate=HPAConfigurableTolerance
+ // +optional
+ Tolerance *resource.Quantity `json:"tolerance,omitempty" protobuf:"bytes,4,opt,name=tolerance"`
}
// HPAScalingPolicyType is the type of the policy which could be used while making scaling decisions.
diff --git a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go
index 649cd04a0..017fefcde 100644
--- a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go
@@ -92,10 +92,11 @@ func (HPAScalingPolicy) SwaggerDoc() map[string]string {
}
var map_HPAScalingRules = map[string]string{
- "": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.",
+ "": "HPAScalingRules configures the scaling behavior for one direction via scaling Policy Rules and a configurable metric tolerance.\n\nScaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.\n\nThe tolerance is applied to the metric values and prevents scaling too eagerly for small metric variations. (Note that setting a tolerance requires enabling the alpha HPAConfigurableTolerance feature gate.)",
"stabilizationWindowSeconds": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).",
"selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value Max is used.",
- "policies": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid",
+ "policies": "policies is a list of potential scaling polices which can be used during scaling. If not set, use the default values: - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window. - For scale down: allow all pods to be removed in a 15s window.",
+ "tolerance": "tolerance is the tolerance on the ratio between the current and desired metric value under which no updates are made to the desired number of replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not set, the default cluster-wide tolerance is applied (by default 10%).\n\nFor example, if autoscaling is configured with a memory consumption target of 100Mi, and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be triggered when the actual consumption falls below 95Mi or exceeds 101Mi.\n\nThis is an alpha field and requires enabling the HPAConfigurableTolerance feature gate.",
}
func (HPAScalingRules) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go
index 125708d6f..5fbcf9f80 100644
--- a/vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go
@@ -146,6 +146,11 @@ func (in *HPAScalingRules) DeepCopyInto(out *HPAScalingRules) {
*out = make([]HPAScalingPolicy, len(*in))
copy(*out, *in)
}
+ if in.Tolerance != nil {
+ in, out := &in.Tolerance, &out.Tolerance
+ x := (*in).DeepCopy()
+ *out = &x
+ }
return
}
diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go
index 25ca507bb..eac92e86e 100644
--- a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go
+++ b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go
@@ -19,4 +19,4 @@ limitations under the License.
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
-package v2beta1 // import "k8s.io/api/autoscaling/v2beta1"
+package v2beta1
diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/doc.go b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go
index 76fb0aff8..150037297 100644
--- a/vendor/k8s.io/api/autoscaling/v2beta2/doc.go
+++ b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go
@@ -19,4 +19,4 @@ limitations under the License.
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
-package v2beta2 // import "k8s.io/api/autoscaling/v2beta2"
+package v2beta2
diff --git a/vendor/k8s.io/api/batch/v1/doc.go b/vendor/k8s.io/api/batch/v1/doc.go
index cb5cbb600..69088e2c5 100644
--- a/vendor/k8s.io/api/batch/v1/doc.go
+++ b/vendor/k8s.io/api/batch/v1/doc.go
@@ -18,4 +18,4 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
-package v1 // import "k8s.io/api/batch/v1"
+package v1
diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto
index 361ebdca1..d3aeae0ad 100644
--- a/vendor/k8s.io/api/batch/v1/generated.proto
+++ b/vendor/k8s.io/api/batch/v1/generated.proto
@@ -222,8 +222,6 @@ message JobSpec {
// When the field is specified, it must be immutable and works only for the Indexed Jobs.
// Once the Job meets the SuccessPolicy, the lingering pods are terminated.
//
- // This field is beta-level. To use this field, you must enable the
- // `JobSuccessPolicy` feature gate (enabled by default).
// +optional
optional SuccessPolicy successPolicy = 16;
@@ -238,8 +236,6 @@ message JobSpec {
// batch.kubernetes.io/job-index-failure-count annotation. It can only
// be set when Job's completionMode=Indexed, and the Pod's restart
// policy is Never. The field is immutable.
- // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
- // feature gate is enabled (enabled by default).
// +optional
optional int32 backoffLimitPerIndex = 12;
@@ -251,8 +247,6 @@ message JobSpec {
// It can only be specified when backoffLimitPerIndex is set.
// It can be null or up to completions. It is required and must be
// less than or equal to 10^4 when is completions greater than 10^5.
- // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
- // feature gate is enabled (enabled by default).
// +optional
optional int32 maxFailedIndexes = 13;
@@ -442,8 +436,6 @@ message JobStatus {
// represented as "1,3-5,7".
// The set of failed indexes cannot overlap with the set of completed indexes.
//
- // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
- // feature gate is enabled (enabled by default).
// +optional
optional string failedIndexes = 10;
@@ -554,8 +546,6 @@ message PodFailurePolicyRule {
// running pods are terminated.
// - FailIndex: indicates that the pod's index is marked as Failed and will
// not be restarted.
- // This value is beta-level. It can be used when the
- // `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).
// - Ignore: indicates that the counter towards the .backoffLimit is not
// incremented and a replacement pod is created.
// - Count: indicates that the pod is handled in the default way - the
diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go
index 8e9a761b9..6c0007c21 100644
--- a/vendor/k8s.io/api/batch/v1/types.go
+++ b/vendor/k8s.io/api/batch/v1/types.go
@@ -128,7 +128,6 @@ const (
// This is an action which might be taken on a pod failure - mark the
// Job's index as failed to avoid restarts within this index. This action
// can only be used when backoffLimitPerIndex is set.
- // This value is beta-level.
PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex"
// This is an action which might be taken on a pod failure - the counter towards
@@ -223,8 +222,6 @@ type PodFailurePolicyRule struct {
// running pods are terminated.
// - FailIndex: indicates that the pod's index is marked as Failed and will
// not be restarted.
- // This value is beta-level. It can be used when the
- // `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).
// - Ignore: indicates that the counter towards the .backoffLimit is not
// incremented and a replacement pod is created.
// - Count: indicates that the pod is handled in the default way - the
@@ -346,8 +343,6 @@ type JobSpec struct {
// When the field is specified, it must be immutable and works only for the Indexed Jobs.
// Once the Job meets the SuccessPolicy, the lingering pods are terminated.
//
- // This field is beta-level. To use this field, you must enable the
- // `JobSuccessPolicy` feature gate (enabled by default).
// +optional
SuccessPolicy *SuccessPolicy `json:"successPolicy,omitempty" protobuf:"bytes,16,opt,name=successPolicy"`
@@ -362,8 +357,6 @@ type JobSpec struct {
// batch.kubernetes.io/job-index-failure-count annotation. It can only
// be set when Job's completionMode=Indexed, and the Pod's restart
// policy is Never. The field is immutable.
- // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
- // feature gate is enabled (enabled by default).
// +optional
BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty" protobuf:"varint,12,opt,name=backoffLimitPerIndex"`
@@ -375,8 +368,6 @@ type JobSpec struct {
// It can only be specified when backoffLimitPerIndex is set.
// It can be null or up to completions. It is required and must be
// less than or equal to 10^4 when is completions greater than 10^5.
- // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
- // feature gate is enabled (enabled by default).
// +optional
MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty" protobuf:"varint,13,opt,name=maxFailedIndexes"`
@@ -571,8 +562,6 @@ type JobStatus struct {
// represented as "1,3-5,7".
// The set of failed indexes cannot overlap with the set of completed indexes.
//
- // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
- // feature gate is enabled (enabled by default).
// +optional
FailedIndexes *string `json:"failedIndexes,omitempty" protobuf:"bytes,10,opt,name=failedIndexes"`
@@ -647,13 +636,9 @@ const (
JobReasonFailedIndexes string = "FailedIndexes"
// JobReasonSuccessPolicy reason indicates a SuccessCriteriaMet condition is added due to
// a Job met successPolicy.
- // https://kep.k8s.io/3998
- // This is currently a beta field.
JobReasonSuccessPolicy string = "SuccessPolicy"
// JobReasonCompletionsReached reason indicates a SuccessCriteriaMet condition is added due to
// a number of succeeded Job pods met completions.
- // - https://kep.k8s.io/3998
- // This is currently a beta field.
JobReasonCompletionsReached string = "CompletionsReached"
)
diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
index 893f3371f..ffd4e4f5f 100644
--- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
@@ -116,10 +116,10 @@ var map_JobSpec = map[string]string{
"completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
"activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.",
"podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.",
- "successPolicy": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default).",
+ "successPolicy": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.",
"backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6",
- "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).",
- "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).",
+ "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.",
+ "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.",
"selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
"manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector",
"template": "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
@@ -144,7 +144,7 @@ var map_JobStatus = map[string]string{
"failed": "The number of pods which reached phase Failed. The value increases monotonically.",
"terminating": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default).",
"completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".",
- "failedIndexes": "FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". The set of failed indexes cannot overlap with the set of completed indexes.\n\nThis field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).",
+ "failedIndexes": "FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". The set of failed indexes cannot overlap with the set of completed indexes.",
"uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null. The structure is empty for finished jobs.",
"ready": "The number of active pods which have a Ready condition and are not terminating (without a deletionTimestamp).",
}
@@ -195,7 +195,7 @@ func (PodFailurePolicyOnPodConditionsPattern) SwaggerDoc() map[string]string {
var map_PodFailurePolicyRule = map[string]string{
"": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.",
- "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is beta-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.",
+ "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.",
"onExitCodes": "Represents the requirement on the container exit codes.",
"onPodConditions": "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed.",
}
diff --git a/vendor/k8s.io/api/batch/v1beta1/doc.go b/vendor/k8s.io/api/batch/v1beta1/doc.go
index cb2572f5d..3430d6939 100644
--- a/vendor/k8s.io/api/batch/v1beta1/doc.go
+++ b/vendor/k8s.io/api/batch/v1beta1/doc.go
@@ -19,4 +19,4 @@ limitations under the License.
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
-package v1beta1 // import "k8s.io/api/batch/v1beta1"
+package v1beta1
diff --git a/vendor/k8s.io/api/certificates/v1/doc.go b/vendor/k8s.io/api/certificates/v1/doc.go
index 78434478e..6c16fc29b 100644
--- a/vendor/k8s.io/api/certificates/v1/doc.go
+++ b/vendor/k8s.io/api/certificates/v1/doc.go
@@ -20,4 +20,4 @@ limitations under the License.
// +k8s:prerelease-lifecycle-gen=true
// +groupName=certificates.k8s.io
-package v1 // import "k8s.io/api/certificates/v1"
+package v1
diff --git a/vendor/k8s.io/api/certificates/v1alpha1/doc.go b/vendor/k8s.io/api/certificates/v1alpha1/doc.go
index d83d0e820..01481df8e 100644
--- a/vendor/k8s.io/api/certificates/v1alpha1/doc.go
+++ b/vendor/k8s.io/api/certificates/v1alpha1/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// +groupName=certificates.k8s.io
-package v1alpha1 // import "k8s.io/api/certificates/v1alpha1"
+package v1alpha1
diff --git a/vendor/k8s.io/api/certificates/v1beta1/doc.go b/vendor/k8s.io/api/certificates/v1beta1/doc.go
index 1165518c6..81608a554 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/doc.go
+++ b/vendor/k8s.io/api/certificates/v1beta1/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// +groupName=certificates.k8s.io
-package v1beta1 // import "k8s.io/api/certificates/v1beta1"
+package v1beta1
diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go
index b6d8ab3f5..199a54496 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go
@@ -186,10 +186,94 @@ func (m *CertificateSigningRequestStatus) XXX_DiscardUnknown() {
var xxx_messageInfo_CertificateSigningRequestStatus proto.InternalMessageInfo
+func (m *ClusterTrustBundle) Reset() { *m = ClusterTrustBundle{} }
+func (*ClusterTrustBundle) ProtoMessage() {}
+func (*ClusterTrustBundle) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6529c11a462c48a5, []int{5}
+}
+func (m *ClusterTrustBundle) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterTrustBundle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterTrustBundle) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterTrustBundle.Merge(m, src)
+}
+func (m *ClusterTrustBundle) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterTrustBundle) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterTrustBundle.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterTrustBundle proto.InternalMessageInfo
+
+func (m *ClusterTrustBundleList) Reset() { *m = ClusterTrustBundleList{} }
+func (*ClusterTrustBundleList) ProtoMessage() {}
+func (*ClusterTrustBundleList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6529c11a462c48a5, []int{6}
+}
+func (m *ClusterTrustBundleList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterTrustBundleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterTrustBundleList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterTrustBundleList.Merge(m, src)
+}
+func (m *ClusterTrustBundleList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterTrustBundleList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterTrustBundleList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterTrustBundleList proto.InternalMessageInfo
+
+func (m *ClusterTrustBundleSpec) Reset() { *m = ClusterTrustBundleSpec{} }
+func (*ClusterTrustBundleSpec) ProtoMessage() {}
+func (*ClusterTrustBundleSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6529c11a462c48a5, []int{7}
+}
+func (m *ClusterTrustBundleSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterTrustBundleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterTrustBundleSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterTrustBundleSpec.Merge(m, src)
+}
+func (m *ClusterTrustBundleSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterTrustBundleSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterTrustBundleSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterTrustBundleSpec proto.InternalMessageInfo
+
func (m *ExtraValue) Reset() { *m = ExtraValue{} }
func (*ExtraValue) ProtoMessage() {}
func (*ExtraValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_6529c11a462c48a5, []int{5}
+ return fileDescriptor_6529c11a462c48a5, []int{8}
}
func (m *ExtraValue) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -221,6 +305,9 @@ func init() {
proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec")
proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec.ExtraEntry")
proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestStatus")
+ proto.RegisterType((*ClusterTrustBundle)(nil), "k8s.io.api.certificates.v1beta1.ClusterTrustBundle")
+ proto.RegisterType((*ClusterTrustBundleList)(nil), "k8s.io.api.certificates.v1beta1.ClusterTrustBundleList")
+ proto.RegisterType((*ClusterTrustBundleSpec)(nil), "k8s.io.api.certificates.v1beta1.ClusterTrustBundleSpec")
proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.ExtraValue")
}
@@ -229,64 +316,69 @@ func init() {
}
var fileDescriptor_6529c11a462c48a5 = []byte{
- // 901 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4d, 0x6f, 0x1b, 0x45,
- 0x18, 0xf6, 0xc6, 0x1f, 0xb1, 0xc7, 0x21, 0x6d, 0x47, 0x50, 0x2d, 0x96, 0xea, 0xb5, 0x56, 0x80,
- 0xc2, 0xd7, 0x2c, 0xa9, 0x2a, 0x88, 0x72, 0x40, 0xb0, 0x21, 0x42, 0x11, 0x29, 0x48, 0x93, 0x84,
- 0x03, 0x42, 0xa2, 0x93, 0xf5, 0xdb, 0xcd, 0x34, 0xdd, 0x0f, 0x76, 0x66, 0x4d, 0x7d, 0xeb, 0x4f,
- 0xe0, 0xc8, 0x91, 0xff, 0xc0, 0x9f, 0x08, 0x07, 0xa4, 0x1e, 0x7b, 0x40, 0x16, 0x71, 0xff, 0x45,
- 0x4e, 0x68, 0x66, 0xc7, 0x6b, 0xc7, 0x4e, 0x70, 0x69, 0x6f, 0x3b, 0xcf, 0xbc, 0xcf, 0xf3, 0xbc,
- 0xf3, 0xce, 0xfb, 0x8e, 0x8d, 0xbc, 0xd3, 0x2d, 0x41, 0x78, 0xe2, 0xb1, 0x94, 0x7b, 0x01, 0x64,
- 0x92, 0x3f, 0xe4, 0x01, 0x93, 0x20, 0xbc, 0xc1, 0xe6, 0x31, 0x48, 0xb6, 0xe9, 0x85, 0x10, 0x43,
- 0xc6, 0x24, 0xf4, 0x49, 0x9a, 0x25, 0x32, 0xc1, 0x4e, 0x41, 0x20, 0x2c, 0xe5, 0x64, 0x96, 0x40,
- 0x0c, 0xa1, 0xf3, 0x71, 0xc8, 0xe5, 0x49, 0x7e, 0x4c, 0x82, 0x24, 0xf2, 0xc2, 0x24, 0x4c, 0x3c,
- 0xcd, 0x3b, 0xce, 0x1f, 0xea, 0x95, 0x5e, 0xe8, 0xaf, 0x42, 0xaf, 0xe3, 0xce, 0x26, 0x90, 0x64,
- 0xe0, 0x0d, 0x16, 0x3c, 0x3b, 0xf7, 0xa6, 0x31, 0x11, 0x0b, 0x4e, 0x78, 0x0c, 0xd9, 0xd0, 0x4b,
- 0x4f, 0x43, 0x05, 0x08, 0x2f, 0x02, 0xc9, 0xae, 0x62, 0x79, 0xd7, 0xb1, 0xb2, 0x3c, 0x96, 0x3c,
- 0x82, 0x05, 0xc2, 0xa7, 0xcb, 0x08, 0x22, 0x38, 0x81, 0x88, 0xcd, 0xf3, 0xdc, 0x3f, 0x57, 0xd0,
- 0xdb, 0x3b, 0xd3, 0x52, 0x1c, 0xf0, 0x30, 0xe6, 0x71, 0x48, 0xe1, 0xe7, 0x1c, 0x84, 0xc4, 0x0f,
- 0x50, 0x53, 0x65, 0xd8, 0x67, 0x92, 0xd9, 0x56, 0xcf, 0xda, 0x68, 0xdf, 0xfd, 0x84, 0x4c, 0x6b,
- 0x58, 0x1a, 0x91, 0xf4, 0x34, 0x54, 0x80, 0x20, 0x2a, 0x9a, 0x0c, 0x36, 0xc9, 0x77, 0xc7, 0x8f,
- 0x20, 0x90, 0xf7, 0x41, 0x32, 0x1f, 0x9f, 0x8d, 0x9c, 0xca, 0x78, 0xe4, 0xa0, 0x29, 0x46, 0x4b,
- 0x55, 0xfc, 0x00, 0xd5, 0x44, 0x0a, 0x81, 0xbd, 0xa2, 0xd5, 0x3f, 0x27, 0x4b, 0x6e, 0x88, 0x5c,
- 0x9b, 0xeb, 0x41, 0x0a, 0x81, 0xbf, 0x66, 0xbc, 0x6a, 0x6a, 0x45, 0xb5, 0x32, 0x3e, 0x41, 0x0d,
- 0x21, 0x99, 0xcc, 0x85, 0x5d, 0xd5, 0x1e, 0x5f, 0xbc, 0x86, 0x87, 0xd6, 0xf1, 0xd7, 0x8d, 0x4b,
- 0xa3, 0x58, 0x53, 0xa3, 0xef, 0xbe, 0xa8, 0x22, 0xf7, 0x5a, 0xee, 0x4e, 0x12, 0xf7, 0xb9, 0xe4,
- 0x49, 0x8c, 0xb7, 0x50, 0x4d, 0x0e, 0x53, 0xd0, 0x05, 0x6d, 0xf9, 0xef, 0x4c, 0x52, 0x3e, 0x1c,
- 0xa6, 0x70, 0x31, 0x72, 0xde, 0x9c, 0x8f, 0x57, 0x38, 0xd5, 0x0c, 0xbc, 0x5f, 0x1e, 0xa5, 0xa1,
- 0xb9, 0xf7, 0x2e, 0x27, 0x72, 0x31, 0x72, 0xae, 0xe8, 0x48, 0x52, 0x2a, 0x5d, 0x4e, 0x17, 0xbf,
- 0x87, 0x1a, 0x19, 0x30, 0x91, 0xc4, 0xba, 0xf8, 0xad, 0xe9, 0xb1, 0xa8, 0x46, 0xa9, 0xd9, 0xc5,
- 0xef, 0xa3, 0xd5, 0x08, 0x84, 0x60, 0x21, 0xe8, 0x0a, 0xb6, 0xfc, 0x1b, 0x26, 0x70, 0xf5, 0x7e,
- 0x01, 0xd3, 0xc9, 0x3e, 0x7e, 0x84, 0xd6, 0x1f, 0x33, 0x21, 0x8f, 0xd2, 0x3e, 0x93, 0x70, 0xc8,
- 0x23, 0xb0, 0x6b, 0xba, 0xe6, 0x1f, 0xbc, 0x5c, 0xd7, 0x28, 0x86, 0x7f, 0xdb, 0xa8, 0xaf, 0xef,
- 0x5f, 0x52, 0xa2, 0x73, 0xca, 0x78, 0x80, 0xb0, 0x42, 0x0e, 0x33, 0x16, 0x8b, 0xa2, 0x50, 0xca,
- 0xaf, 0xfe, 0xbf, 0xfd, 0x3a, 0xc6, 0x0f, 0xef, 0x2f, 0xa8, 0xd1, 0x2b, 0x1c, 0xdc, 0x91, 0x85,
- 0xee, 0x5c, 0x7b, 0xcb, 0xfb, 0x5c, 0x48, 0xfc, 0xe3, 0xc2, 0xd4, 0x90, 0x97, 0xcb, 0x47, 0xb1,
- 0xf5, 0xcc, 0xdc, 0x34, 0x39, 0x35, 0x27, 0xc8, 0xcc, 0xc4, 0xfc, 0x84, 0xea, 0x5c, 0x42, 0x24,
- 0xec, 0x95, 0x5e, 0x75, 0xa3, 0x7d, 0x77, 0xfb, 0xd5, 0xdb, 0xd9, 0x7f, 0xc3, 0xd8, 0xd4, 0xf7,
- 0x94, 0x20, 0x2d, 0x74, 0xdd, 0x3f, 0x6a, 0xff, 0x71, 0x40, 0x35, 0x58, 0xf8, 0x5d, 0xb4, 0x9a,
- 0x15, 0x4b, 0x7d, 0xbe, 0x35, 0xbf, 0xad, 0xba, 0xc1, 0x44, 0xd0, 0xc9, 0x1e, 0x26, 0x08, 0x09,
- 0x1e, 0xc6, 0x90, 0x7d, 0xcb, 0x22, 0xb0, 0x57, 0x8b, 0x26, 0x53, 0x2f, 0xc1, 0x41, 0x89, 0xd2,
- 0x99, 0x08, 0xbc, 0x83, 0x6e, 0xc1, 0x93, 0x94, 0x67, 0x4c, 0x37, 0x2b, 0x04, 0x49, 0xdc, 0x17,
- 0x76, 0xb3, 0x67, 0x6d, 0xd4, 0xfd, 0xb7, 0xc6, 0x23, 0xe7, 0xd6, 0xee, 0xfc, 0x26, 0x5d, 0x8c,
- 0xc7, 0x04, 0x35, 0x72, 0xd5, 0x8b, 0xc2, 0xae, 0xf7, 0xaa, 0x1b, 0x2d, 0xff, 0xb6, 0xea, 0xe8,
- 0x23, 0x8d, 0x5c, 0x8c, 0x9c, 0xe6, 0x37, 0x30, 0xd4, 0x0b, 0x6a, 0xa2, 0xf0, 0x47, 0xa8, 0x99,
- 0x0b, 0xc8, 0x62, 0x95, 0x62, 0x31, 0x07, 0x65, 0xf1, 0x8f, 0x0c, 0x4e, 0xcb, 0x08, 0x7c, 0x07,
- 0x55, 0x73, 0xde, 0x37, 0x73, 0xd0, 0x36, 0x81, 0xd5, 0xa3, 0xbd, 0xaf, 0xa8, 0xc2, 0xb1, 0x8b,
- 0x1a, 0x61, 0x96, 0xe4, 0xa9, 0xb0, 0x6b, 0xda, 0x1c, 0x29, 0xf3, 0xaf, 0x35, 0x42, 0xcd, 0x0e,
- 0x8e, 0x51, 0x1d, 0x9e, 0xc8, 0x8c, 0xd9, 0x0d, 0x7d, 0x7f, 0x7b, 0xaf, 0xf7, 0xe4, 0x91, 0x5d,
- 0xa5, 0xb5, 0x1b, 0xcb, 0x6c, 0x38, 0xbd, 0x4e, 0x8d, 0xd1, 0xc2, 0xa6, 0x03, 0x08, 0x4d, 0x63,
- 0xf0, 0x4d, 0x54, 0x3d, 0x85, 0x61, 0xf1, 0xf6, 0x50, 0xf5, 0x89, 0xbf, 0x44, 0xf5, 0x01, 0x7b,
- 0x9c, 0x83, 0x79, 0x82, 0x3f, 0x5c, 0x9a, 0x8f, 0x56, 0xfb, 0x5e, 0x51, 0x68, 0xc1, 0xdc, 0x5e,
- 0xd9, 0xb2, 0xdc, 0xbf, 0x2c, 0xe4, 0x2c, 0x79, 0x38, 0xf1, 0x2f, 0x08, 0x05, 0x93, 0xc7, 0x48,
- 0xd8, 0x96, 0x3e, 0xff, 0xce, 0xab, 0x9f, 0xbf, 0x7c, 0xd8, 0xa6, 0xbf, 0x31, 0x25, 0x24, 0xe8,
- 0x8c, 0x15, 0xde, 0x44, 0xed, 0x19, 0x69, 0x7d, 0xd2, 0x35, 0xff, 0xc6, 0x78, 0xe4, 0xb4, 0x67,
- 0xc4, 0xe9, 0x6c, 0x8c, 0xfb, 0x99, 0x29, 0x9b, 0x3e, 0x28, 0x76, 0x26, 0x43, 0x67, 0xe9, 0x7b,
- 0x6d, 0xcd, 0x0f, 0xcd, 0x76, 0xf3, 0xb7, 0xdf, 0x9d, 0xca, 0xd3, 0xbf, 0x7b, 0x15, 0x7f, 0xf7,
- 0xec, 0xbc, 0x5b, 0x79, 0x76, 0xde, 0xad, 0x3c, 0x3f, 0xef, 0x56, 0x9e, 0x8e, 0xbb, 0xd6, 0xd9,
- 0xb8, 0x6b, 0x3d, 0x1b, 0x77, 0xad, 0xe7, 0xe3, 0xae, 0xf5, 0xcf, 0xb8, 0x6b, 0xfd, 0xfa, 0xa2,
- 0x5b, 0xf9, 0xc1, 0x59, 0xf2, 0xdf, 0xe5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x35, 0x2f, 0x11,
- 0xe8, 0xdd, 0x08, 0x00, 0x00,
+ // 991 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0xe3, 0x44,
+ 0x14, 0x8f, 0x9b, 0x3f, 0x4d, 0x26, 0xa5, 0xbb, 0x3b, 0x40, 0x65, 0x22, 0x6d, 0x1c, 0x59, 0x80,
+ 0xca, 0x3f, 0x9b, 0x96, 0x85, 0xad, 0x7a, 0x40, 0xe0, 0x50, 0xa1, 0x8a, 0x2e, 0x48, 0xd3, 0x16,
+ 0x01, 0x42, 0x62, 0xa7, 0xce, 0x5b, 0xd7, 0xdb, 0xc6, 0x36, 0x9e, 0x71, 0xd8, 0xdc, 0x56, 0xe2,
+ 0x0b, 0x70, 0xe4, 0xc8, 0x77, 0xe0, 0x4b, 0x94, 0x03, 0x52, 0xb9, 0xed, 0x01, 0x45, 0x34, 0xfb,
+ 0x2d, 0x7a, 0x42, 0x33, 0x9e, 0x38, 0x4e, 0xd2, 0x90, 0xa5, 0x2b, 0xed, 0x2d, 0xf3, 0xe6, 0xfd,
+ 0x7e, 0xbf, 0xf7, 0x9e, 0xdf, 0x7b, 0x13, 0x64, 0x9f, 0x6c, 0x31, 0xcb, 0x0f, 0x6d, 0x1a, 0xf9,
+ 0xb6, 0x0b, 0x31, 0xf7, 0x1f, 0xf8, 0x2e, 0xe5, 0xc0, 0xec, 0xde, 0xc6, 0x11, 0x70, 0xba, 0x61,
+ 0x7b, 0x10, 0x40, 0x4c, 0x39, 0x74, 0xac, 0x28, 0x0e, 0x79, 0x88, 0x8d, 0x14, 0x60, 0xd1, 0xc8,
+ 0xb7, 0xf2, 0x00, 0x4b, 0x01, 0x1a, 0xef, 0x79, 0x3e, 0x3f, 0x4e, 0x8e, 0x2c, 0x37, 0xec, 0xda,
+ 0x5e, 0xe8, 0x85, 0xb6, 0xc4, 0x1d, 0x25, 0x0f, 0xe4, 0x49, 0x1e, 0xe4, 0xaf, 0x94, 0xaf, 0x61,
+ 0xe6, 0x03, 0x08, 0x63, 0xb0, 0x7b, 0x33, 0x9a, 0x8d, 0x3b, 0x63, 0x9f, 0x2e, 0x75, 0x8f, 0xfd,
+ 0x00, 0xe2, 0xbe, 0x1d, 0x9d, 0x78, 0xc2, 0xc0, 0xec, 0x2e, 0x70, 0x7a, 0x15, 0xca, 0x9e, 0x87,
+ 0x8a, 0x93, 0x80, 0xfb, 0x5d, 0x98, 0x01, 0x7c, 0xb4, 0x08, 0xc0, 0xdc, 0x63, 0xe8, 0xd2, 0x69,
+ 0x9c, 0xf9, 0xc7, 0x12, 0x7a, 0xad, 0x3d, 0x2e, 0xc5, 0xbe, 0xef, 0x05, 0x7e, 0xe0, 0x11, 0xf8,
+ 0x31, 0x01, 0xc6, 0xf1, 0x7d, 0x54, 0x15, 0x11, 0x76, 0x28, 0xa7, 0xba, 0xd6, 0xd2, 0xd6, 0xeb,
+ 0x9b, 0xef, 0x5b, 0xe3, 0x1a, 0x66, 0x42, 0x56, 0x74, 0xe2, 0x09, 0x03, 0xb3, 0x84, 0xb7, 0xd5,
+ 0xdb, 0xb0, 0xbe, 0x3a, 0x7a, 0x08, 0x2e, 0xbf, 0x07, 0x9c, 0x3a, 0xf8, 0x6c, 0x60, 0x14, 0x86,
+ 0x03, 0x03, 0x8d, 0x6d, 0x24, 0x63, 0xc5, 0xf7, 0x51, 0x89, 0x45, 0xe0, 0xea, 0x4b, 0x92, 0xfd,
+ 0x63, 0x6b, 0xc1, 0x17, 0xb2, 0xe6, 0xc6, 0xba, 0x1f, 0x81, 0xeb, 0xac, 0x28, 0xad, 0x92, 0x38,
+ 0x11, 0xc9, 0x8c, 0x8f, 0x51, 0x85, 0x71, 0xca, 0x13, 0xa6, 0x17, 0xa5, 0xc6, 0x27, 0xcf, 0xa1,
+ 0x21, 0x79, 0x9c, 0x55, 0xa5, 0x52, 0x49, 0xcf, 0x44, 0xf1, 0x9b, 0x4f, 0x8b, 0xc8, 0x9c, 0x8b,
+ 0x6d, 0x87, 0x41, 0xc7, 0xe7, 0x7e, 0x18, 0xe0, 0x2d, 0x54, 0xe2, 0xfd, 0x08, 0x64, 0x41, 0x6b,
+ 0xce, 0xeb, 0xa3, 0x90, 0x0f, 0xfa, 0x11, 0x5c, 0x0e, 0x8c, 0x57, 0xa6, 0xfd, 0x85, 0x9d, 0x48,
+ 0x04, 0xde, 0xcb, 0x52, 0xa9, 0x48, 0xec, 0x9d, 0xc9, 0x40, 0x2e, 0x07, 0xc6, 0x15, 0x1d, 0x69,
+ 0x65, 0x4c, 0x93, 0xe1, 0xe2, 0x37, 0x51, 0x25, 0x06, 0xca, 0xc2, 0x40, 0x16, 0xbf, 0x36, 0x4e,
+ 0x8b, 0x48, 0x2b, 0x51, 0xb7, 0xf8, 0x2d, 0xb4, 0xdc, 0x05, 0xc6, 0xa8, 0x07, 0xb2, 0x82, 0x35,
+ 0xe7, 0x86, 0x72, 0x5c, 0xbe, 0x97, 0x9a, 0xc9, 0xe8, 0x1e, 0x3f, 0x44, 0xab, 0xa7, 0x94, 0xf1,
+ 0xc3, 0xa8, 0x43, 0x39, 0x1c, 0xf8, 0x5d, 0xd0, 0x4b, 0xb2, 0xe6, 0x6f, 0x3f, 0x5b, 0xd7, 0x08,
+ 0x84, 0xb3, 0xa6, 0xd8, 0x57, 0xf7, 0x26, 0x98, 0xc8, 0x14, 0x33, 0xee, 0x21, 0x2c, 0x2c, 0x07,
+ 0x31, 0x0d, 0x58, 0x5a, 0x28, 0xa1, 0x57, 0xfe, 0xdf, 0x7a, 0x0d, 0xa5, 0x87, 0xf7, 0x66, 0xd8,
+ 0xc8, 0x15, 0x0a, 0xe6, 0x40, 0x43, 0xb7, 0xe7, 0x7e, 0xe5, 0x3d, 0x9f, 0x71, 0xfc, 0xfd, 0xcc,
+ 0xd4, 0x58, 0xcf, 0x16, 0x8f, 0x40, 0xcb, 0x99, 0xb9, 0xa9, 0x62, 0xaa, 0x8e, 0x2c, 0xb9, 0x89,
+ 0xf9, 0x01, 0x95, 0x7d, 0x0e, 0x5d, 0xa6, 0x2f, 0xb5, 0x8a, 0xeb, 0xf5, 0xcd, 0xed, 0xeb, 0xb7,
+ 0xb3, 0xf3, 0x92, 0x92, 0x29, 0xef, 0x0a, 0x42, 0x92, 0xf2, 0x9a, 0xbf, 0x97, 0xfe, 0x23, 0x41,
+ 0x31, 0x58, 0xf8, 0x0d, 0xb4, 0x1c, 0xa7, 0x47, 0x99, 0xdf, 0x8a, 0x53, 0x17, 0xdd, 0xa0, 0x3c,
+ 0xc8, 0xe8, 0x0e, 0x5b, 0x08, 0x31, 0xdf, 0x0b, 0x20, 0xfe, 0x92, 0x76, 0x41, 0x5f, 0x4e, 0x9b,
+ 0x4c, 0x6c, 0x82, 0xfd, 0xcc, 0x4a, 0x72, 0x1e, 0xb8, 0x8d, 0x6e, 0xc1, 0xa3, 0xc8, 0x8f, 0xa9,
+ 0x6c, 0x56, 0x70, 0xc3, 0xa0, 0xc3, 0xf4, 0x6a, 0x4b, 0x5b, 0x2f, 0x3b, 0xaf, 0x0e, 0x07, 0xc6,
+ 0xad, 0x9d, 0xe9, 0x4b, 0x32, 0xeb, 0x8f, 0x2d, 0x54, 0x49, 0x44, 0x2f, 0x32, 0xbd, 0xdc, 0x2a,
+ 0xae, 0xd7, 0x9c, 0x35, 0xd1, 0xd1, 0x87, 0xd2, 0x72, 0x39, 0x30, 0xaa, 0x5f, 0x40, 0x5f, 0x1e,
+ 0x88, 0xf2, 0xc2, 0xef, 0xa2, 0x6a, 0xc2, 0x20, 0x0e, 0x44, 0x88, 0xe9, 0x1c, 0x64, 0xc5, 0x3f,
+ 0x54, 0x76, 0x92, 0x79, 0xe0, 0xdb, 0xa8, 0x98, 0xf8, 0x1d, 0x35, 0x07, 0x75, 0xe5, 0x58, 0x3c,
+ 0xdc, 0xfd, 0x8c, 0x08, 0x3b, 0x36, 0x51, 0xc5, 0x8b, 0xc3, 0x24, 0x62, 0x7a, 0x49, 0x8a, 0x23,
+ 0x21, 0xfe, 0xb9, 0xb4, 0x10, 0x75, 0x83, 0x03, 0x54, 0x86, 0x47, 0x3c, 0xa6, 0x7a, 0x45, 0x7e,
+ 0xbf, 0xdd, 0xe7, 0x5b, 0x79, 0xd6, 0x8e, 0xe0, 0xda, 0x09, 0x78, 0xdc, 0x1f, 0x7f, 0x4e, 0x69,
+ 0x23, 0xa9, 0x4c, 0x03, 0x10, 0x1a, 0xfb, 0xe0, 0x9b, 0xa8, 0x78, 0x02, 0xfd, 0x74, 0xf7, 0x10,
+ 0xf1, 0x13, 0x7f, 0x8a, 0xca, 0x3d, 0x7a, 0x9a, 0x80, 0x5a, 0xc1, 0xef, 0x2c, 0x8c, 0x47, 0xb2,
+ 0x7d, 0x2d, 0x20, 0x24, 0x45, 0x6e, 0x2f, 0x6d, 0x69, 0xe6, 0x9f, 0x1a, 0x32, 0x16, 0x2c, 0x4e,
+ 0xfc, 0x13, 0x42, 0xee, 0x68, 0x19, 0x31, 0x5d, 0x93, 0xf9, 0xb7, 0xaf, 0x9f, 0x7f, 0xb6, 0xd8,
+ 0xc6, 0x6f, 0x4c, 0x66, 0x62, 0x24, 0x27, 0x85, 0x37, 0x50, 0x3d, 0x47, 0x2d, 0x33, 0x5d, 0x71,
+ 0x6e, 0x0c, 0x07, 0x46, 0x3d, 0x47, 0x4e, 0xf2, 0x3e, 0xe6, 0x5f, 0x1a, 0xc2, 0xed, 0xd3, 0x84,
+ 0x71, 0x88, 0x0f, 0xe2, 0x84, 0x71, 0x27, 0x09, 0x3a, 0xa7, 0xf0, 0x02, 0x5e, 0xc4, 0x6f, 0x27,
+ 0x5e, 0xc4, 0xbb, 0x8b, 0xcb, 0x33, 0x13, 0xe4, 0xbc, 0xa7, 0xd0, 0x3c, 0xd7, 0xd0, 0xda, 0xac,
+ 0xfb, 0x0b, 0xd8, 0x59, 0xdf, 0x4c, 0xee, 0xac, 0x0f, 0xae, 0x91, 0xd4, 0x9c, 0x65, 0xf5, 0xf3,
+ 0x95, 0x29, 0xc9, 0x2d, 0xb5, 0x39, 0xb1, 0x7e, 0xd2, 0xd7, 0x36, 0x2b, 0xfd, 0x9c, 0x15, 0xf4,
+ 0x21, 0xaa, 0xf3, 0x31, 0x8d, 0x5a, 0x08, 0x2f, 0x2b, 0x50, 0x3d, 0xa7, 0x40, 0xf2, 0x7e, 0xe6,
+ 0x5d, 0x35, 0x63, 0x72, 0x2a, 0xb0, 0x31, 0xca, 0x56, 0x93, 0x4b, 0xa0, 0x36, 0x1d, 0xf4, 0x76,
+ 0xf5, 0xd7, 0xdf, 0x8c, 0xc2, 0xe3, 0xbf, 0x5b, 0x05, 0x67, 0xe7, 0xec, 0xa2, 0x59, 0x38, 0xbf,
+ 0x68, 0x16, 0x9e, 0x5c, 0x34, 0x0b, 0x8f, 0x87, 0x4d, 0xed, 0x6c, 0xd8, 0xd4, 0xce, 0x87, 0x4d,
+ 0xed, 0xc9, 0xb0, 0xa9, 0xfd, 0x33, 0x6c, 0x6a, 0xbf, 0x3c, 0x6d, 0x16, 0xbe, 0x33, 0x16, 0xfc,
+ 0xd1, 0xfd, 0x37, 0x00, 0x00, 0xff, 0xff, 0x17, 0xbe, 0xe3, 0x02, 0x0a, 0x0b, 0x00, 0x00,
}
func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) {
@@ -595,6 +687,129 @@ func (m *CertificateSigningRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int
return len(dAtA) - i, nil
}
+func (m *ClusterTrustBundle) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterTrustBundle) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterTrustBundle) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterTrustBundleList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterTrustBundleList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterTrustBundleList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterTrustBundleSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterTrustBundleSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterTrustBundleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.TrustBundle)
+ copy(dAtA[i:], m.TrustBundle)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.TrustBundle)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.SignerName)
+ copy(dAtA[i:], m.SignerName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func (m ExtraValue) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -755,6 +970,49 @@ func (m *CertificateSigningRequestStatus) Size() (n int) {
return n
}
+func (m *ClusterTrustBundle) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ClusterTrustBundleList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ClusterTrustBundleSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.SignerName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.TrustBundle)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m ExtraValue) Size() (n int) {
if m == nil {
return 0
@@ -862,6 +1120,44 @@ func (this *CertificateSigningRequestStatus) String() string {
}, "")
return s
}
+func (this *ClusterTrustBundle) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ClusterTrustBundle{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterTrustBundleList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ClusterTrustBundle{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterTrustBundle", "ClusterTrustBundle", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ClusterTrustBundleList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterTrustBundleSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ClusterTrustBundleSpec{`,
+ `SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`,
+ `TrustBundle:` + fmt.Sprintf("%v", this.TrustBundle) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
@@ -1892,6 +2188,353 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *ClusterTrustBundle) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterTrustBundle: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterTrustBundle: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterTrustBundleList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterTrustBundleList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterTrustBundleList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ClusterTrustBundle{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterTrustBundleSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterTrustBundleSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SignerName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TrustBundle", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TrustBundle = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *ExtraValue) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto
index f3ec4c06e..7c48270f6 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto
@@ -190,6 +190,79 @@ message CertificateSigningRequestStatus {
optional bytes certificate = 2;
}
+// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors
+// (root certificates).
+//
+// ClusterTrustBundle objects are considered to be readable by any authenticated
+// user in the cluster, because they can be mounted by pods using the
+// `clusterTrustBundle` projection. All service accounts have read access to
+// ClusterTrustBundles by default. Users who only have namespace-level access
+// to a cluster can read ClusterTrustBundles by impersonating a serviceaccount
+// that they have access to.
+//
+// It can be optionally associated with a particular assigner, in which case it
+// contains one valid set of trust anchors for that signer. Signers may have
+// multiple associated ClusterTrustBundles; each is an independent set of trust
+// anchors for that signer. Admission control is used to enforce that only users
+// with permissions on the signer can create or modify the corresponding bundle.
+message ClusterTrustBundle {
+ // metadata contains the object metadata.
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // spec contains the signer (if any) and trust anchors.
+ optional ClusterTrustBundleSpec spec = 2;
+}
+
+// ClusterTrustBundleList is a collection of ClusterTrustBundle objects
+message ClusterTrustBundleList {
+ // metadata contains the list metadata.
+ //
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // items is a collection of ClusterTrustBundle objects
+ repeated ClusterTrustBundle items = 2;
+}
+
+// ClusterTrustBundleSpec contains the signer and trust anchors.
+message ClusterTrustBundleSpec {
+ // signerName indicates the associated signer, if any.
+ //
+ // In order to create or update a ClusterTrustBundle that sets signerName,
+ // you must have the following cluster-scoped permission:
+ // group=certificates.k8s.io resource=signers resourceName=
+ // verb=attest.
+ //
+ // If signerName is not empty, then the ClusterTrustBundle object must be
+ // named with the signer name as a prefix (translating slashes to colons).
+ // For example, for the signer name `example.com/foo`, valid
+ // ClusterTrustBundle object names include `example.com:foo:abc` and
+ // `example.com:foo:v1`.
+ //
+ // If signerName is empty, then the ClusterTrustBundle object's name must
+ // not have such a prefix.
+ //
+ // List/watch requests for ClusterTrustBundles can filter on this field
+ // using a `spec.signerName=NAME` field selector.
+ //
+ // +optional
+ optional string signerName = 1;
+
+ // trustBundle contains the individual X.509 trust anchors for this
+ // bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.
+ //
+ // The data must consist only of PEM certificate blocks that parse as valid
+ // X.509 certificates. Each certificate must include a basic constraints
+ // extension with the CA bit set. The API server will reject objects that
+ // contain duplicate certificates, or that use PEM block headers.
+ //
+ // Users of ClusterTrustBundles, including Kubelet, are free to reorder and
+ // deduplicate certificate blocks in this file according to their own logic,
+ // as well as to drop PEM block headers and inter-block data.
+ optional string trustBundle = 2;
+}
+
// ExtraValue masks the value so protobuf can generate
// +protobuf.nullable=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
diff --git a/vendor/k8s.io/api/certificates/v1beta1/register.go b/vendor/k8s.io/api/certificates/v1beta1/register.go
index b4f3af9b9..800dccd07 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/register.go
+++ b/vendor/k8s.io/api/certificates/v1beta1/register.go
@@ -51,6 +51,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&CertificateSigningRequest{},
&CertificateSigningRequestList{},
+ &ClusterTrustBundle{},
+ &ClusterTrustBundleList{},
)
// Add the watch version that applies
diff --git a/vendor/k8s.io/api/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go
index 7e5a5c198..1ce104807 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/types.go
+++ b/vendor/k8s.io/api/certificates/v1beta1/types.go
@@ -262,3 +262,88 @@ const (
UsageMicrosoftSGC KeyUsage = "microsoft sgc"
UsageNetscapeSGC KeyUsage = "netscape sgc"
)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:prerelease-lifecycle-gen:introduced=1.33
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors
+// (root certificates).
+//
+// ClusterTrustBundle objects are considered to be readable by any authenticated
+// user in the cluster, because they can be mounted by pods using the
+// `clusterTrustBundle` projection. All service accounts have read access to
+// ClusterTrustBundles by default. Users who only have namespace-level access
+// to a cluster can read ClusterTrustBundles by impersonating a serviceaccount
+// that they have access to.
+//
+// It can be optionally associated with a particular assigner, in which case it
+// contains one valid set of trust anchors for that signer. Signers may have
+// multiple associated ClusterTrustBundles; each is an independent set of trust
+// anchors for that signer. Admission control is used to enforce that only users
+// with permissions on the signer can create or modify the corresponding bundle.
+type ClusterTrustBundle struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata contains the object metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // spec contains the signer (if any) and trust anchors.
+ Spec ClusterTrustBundleSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// ClusterTrustBundleSpec contains the signer and trust anchors.
+type ClusterTrustBundleSpec struct {
+ // signerName indicates the associated signer, if any.
+ //
+ // In order to create or update a ClusterTrustBundle that sets signerName,
+ // you must have the following cluster-scoped permission:
+ // group=certificates.k8s.io resource=signers resourceName=
+ // verb=attest.
+ //
+ // If signerName is not empty, then the ClusterTrustBundle object must be
+ // named with the signer name as a prefix (translating slashes to colons).
+ // For example, for the signer name `example.com/foo`, valid
+ // ClusterTrustBundle object names include `example.com:foo:abc` and
+ // `example.com:foo:v1`.
+ //
+ // If signerName is empty, then the ClusterTrustBundle object's name must
+ // not have such a prefix.
+ //
+ // List/watch requests for ClusterTrustBundles can filter on this field
+ // using a `spec.signerName=NAME` field selector.
+ //
+ // +optional
+ SignerName string `json:"signerName,omitempty" protobuf:"bytes,1,opt,name=signerName"`
+
+ // trustBundle contains the individual X.509 trust anchors for this
+ // bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.
+ //
+ // The data must consist only of PEM certificate blocks that parse as valid
+ // X.509 certificates. Each certificate must include a basic constraints
+ // extension with the CA bit set. The API server will reject objects that
+ // contain duplicate certificates, or that use PEM block headers.
+ //
+ // Users of ClusterTrustBundles, including Kubelet, are free to reorder and
+ // deduplicate certificate blocks in this file according to their own logic,
+ // as well as to drop PEM block headers and inter-block data.
+ TrustBundle string `json:"trustBundle" protobuf:"bytes,2,opt,name=trustBundle"`
+}
+
+// +k8s:prerelease-lifecycle-gen:introduced=1.33
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterTrustBundleList is a collection of ClusterTrustBundle objects
+type ClusterTrustBundleList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata contains the list metadata.
+ //
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // items is a collection of ClusterTrustBundle objects
+ Items []ClusterTrustBundle `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go
index f9ab1f13d..58c69e54d 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go
@@ -75,4 +75,34 @@ func (CertificateSigningRequestStatus) SwaggerDoc() map[string]string {
return map_CertificateSigningRequestStatus
}
+var map_ClusterTrustBundle = map[string]string{
+ "": "ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors (root certificates).\n\nClusterTrustBundle objects are considered to be readable by any authenticated user in the cluster, because they can be mounted by pods using the `clusterTrustBundle` projection. All service accounts have read access to ClusterTrustBundles by default. Users who only have namespace-level access to a cluster can read ClusterTrustBundles by impersonating a serviceaccount that they have access to.\n\nIt can be optionally associated with a particular assigner, in which case it contains one valid set of trust anchors for that signer. Signers may have multiple associated ClusterTrustBundles; each is an independent set of trust anchors for that signer. Admission control is used to enforce that only users with permissions on the signer can create or modify the corresponding bundle.",
+ "metadata": "metadata contains the object metadata.",
+ "spec": "spec contains the signer (if any) and trust anchors.",
+}
+
+func (ClusterTrustBundle) SwaggerDoc() map[string]string {
+ return map_ClusterTrustBundle
+}
+
+var map_ClusterTrustBundleList = map[string]string{
+ "": "ClusterTrustBundleList is a collection of ClusterTrustBundle objects",
+ "metadata": "metadata contains the list metadata.",
+ "items": "items is a collection of ClusterTrustBundle objects",
+}
+
+func (ClusterTrustBundleList) SwaggerDoc() map[string]string {
+ return map_ClusterTrustBundleList
+}
+
+var map_ClusterTrustBundleSpec = map[string]string{
+ "": "ClusterTrustBundleSpec contains the signer and trust anchors.",
+ "signerName": "signerName indicates the associated signer, if any.\n\nIn order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName= verb=attest.\n\nIf signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`.\n\nIf signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix.\n\nList/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector.",
+ "trustBundle": "trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.\n\nThe data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers.\n\nUsers of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data.",
+}
+
+func (ClusterTrustBundleSpec) SwaggerDoc() map[string]string {
+ return map_ClusterTrustBundleSpec
+}
+
// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go
index a315e2ac6..854e83473 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go
@@ -188,6 +188,82 @@ func (in *CertificateSigningRequestStatus) DeepCopy() *CertificateSigningRequest
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterTrustBundle) DeepCopyInto(out *ClusterTrustBundle) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundle.
+func (in *ClusterTrustBundle) DeepCopy() *ClusterTrustBundle {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterTrustBundle)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterTrustBundle) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterTrustBundleList) DeepCopyInto(out *ClusterTrustBundleList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClusterTrustBundle, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleList.
+func (in *ClusterTrustBundleList) DeepCopy() *ClusterTrustBundleList {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterTrustBundleList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterTrustBundleList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterTrustBundleSpec) DeepCopyInto(out *ClusterTrustBundleSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleSpec.
+func (in *ClusterTrustBundleSpec) DeepCopy() *ClusterTrustBundleSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterTrustBundleSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
{
diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go
index 480a32936..062b46f16 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go
+++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go
@@ -72,3 +72,39 @@ func (in *CertificateSigningRequestList) APILifecycleReplacement() schema.GroupV
func (in *CertificateSigningRequestList) APILifecycleRemoved() (major, minor int) {
return 1, 22
}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ClusterTrustBundle) APILifecycleIntroduced() (major, minor int) {
+ return 1, 33
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ClusterTrustBundle) APILifecycleDeprecated() (major, minor int) {
+ return 1, 36
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ClusterTrustBundle) APILifecycleRemoved() (major, minor int) {
+ return 1, 39
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ClusterTrustBundleList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 33
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 36
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) {
+ return 1, 39
+}
diff --git a/vendor/k8s.io/api/coordination/v1/doc.go b/vendor/k8s.io/api/coordination/v1/doc.go
index 9b2fbbda3..82ae6340c 100644
--- a/vendor/k8s.io/api/coordination/v1/doc.go
+++ b/vendor/k8s.io/api/coordination/v1/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// +groupName=coordination.k8s.io
-package v1 // import "k8s.io/api/coordination/v1"
+package v1
diff --git a/vendor/k8s.io/api/coordination/v1alpha2/doc.go b/vendor/k8s.io/api/coordination/v1alpha2/doc.go
index 5e6d65530..dff7df47f 100644
--- a/vendor/k8s.io/api/coordination/v1alpha2/doc.go
+++ b/vendor/k8s.io/api/coordination/v1alpha2/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// +groupName=coordination.k8s.io
-package v1alpha2 // import "k8s.io/api/coordination/v1alpha2"
+package v1alpha2
diff --git a/vendor/k8s.io/api/coordination/v1alpha2/generated.proto b/vendor/k8s.io/api/coordination/v1alpha2/generated.proto
index 7e56cd7f9..250c6113e 100644
--- a/vendor/k8s.io/api/coordination/v1alpha2/generated.proto
+++ b/vendor/k8s.io/api/coordination/v1alpha2/generated.proto
@@ -92,8 +92,6 @@ message LeaseCandidateSpec {
// If multiple candidates for the same Lease return different strategies, the strategy provided
// by the candidate with the latest BinaryVersion will be used. If there is still conflict,
// this is a user error and coordinated leader election will not operate the Lease until resolved.
- // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.
- // +featureGate=CoordinatedLeaderElection
// +required
optional string strategy = 6;
}
diff --git a/vendor/k8s.io/api/coordination/v1alpha2/types.go b/vendor/k8s.io/api/coordination/v1alpha2/types.go
index 2f53b097a..13e1deb06 100644
--- a/vendor/k8s.io/api/coordination/v1alpha2/types.go
+++ b/vendor/k8s.io/api/coordination/v1alpha2/types.go
@@ -73,8 +73,6 @@ type LeaseCandidateSpec struct {
// If multiple candidates for the same Lease return different strategies, the strategy provided
// by the candidate with the latest BinaryVersion will be used. If there is still conflict,
// this is a user error and coordinated leader election will not operate the Lease until resolved.
- // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.
- // +featureGate=CoordinatedLeaderElection
// +required
Strategy v1.CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"`
}
diff --git a/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go
index 39534e6ad..f7e29849e 100644
--- a/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go
@@ -54,7 +54,7 @@ var map_LeaseCandidateSpec = map[string]string{
"renewTime": "RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.",
"binaryVersion": "BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required.",
"emulationVersion": "EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\"",
- "strategy": "Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved. (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.",
+ "strategy": "Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.",
}
func (LeaseCandidateSpec) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/coordination/v1beta1/doc.go b/vendor/k8s.io/api/coordination/v1beta1/doc.go
index e733411aa..cab8becf6 100644
--- a/vendor/k8s.io/api/coordination/v1beta1/doc.go
+++ b/vendor/k8s.io/api/coordination/v1beta1/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// +groupName=coordination.k8s.io
-package v1beta1 // import "k8s.io/api/coordination/v1beta1"
+package v1beta1
diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go
index bea9b8146..52fd4167f 100644
--- a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go
@@ -74,10 +74,94 @@ func (m *Lease) XXX_DiscardUnknown() {
var xxx_messageInfo_Lease proto.InternalMessageInfo
+func (m *LeaseCandidate) Reset() { *m = LeaseCandidate{} }
+func (*LeaseCandidate) ProtoMessage() {}
+func (*LeaseCandidate) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8d4e223b8bb23da3, []int{1}
+}
+func (m *LeaseCandidate) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseCandidate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *LeaseCandidate) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseCandidate.Merge(m, src)
+}
+func (m *LeaseCandidate) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseCandidate) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseCandidate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseCandidate proto.InternalMessageInfo
+
+func (m *LeaseCandidateList) Reset() { *m = LeaseCandidateList{} }
+func (*LeaseCandidateList) ProtoMessage() {}
+func (*LeaseCandidateList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8d4e223b8bb23da3, []int{2}
+}
+func (m *LeaseCandidateList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseCandidateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *LeaseCandidateList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseCandidateList.Merge(m, src)
+}
+func (m *LeaseCandidateList) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseCandidateList) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseCandidateList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseCandidateList proto.InternalMessageInfo
+
+func (m *LeaseCandidateSpec) Reset() { *m = LeaseCandidateSpec{} }
+func (*LeaseCandidateSpec) ProtoMessage() {}
+func (*LeaseCandidateSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8d4e223b8bb23da3, []int{3}
+}
+func (m *LeaseCandidateSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseCandidateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *LeaseCandidateSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseCandidateSpec.Merge(m, src)
+}
+func (m *LeaseCandidateSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseCandidateSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseCandidateSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseCandidateSpec proto.InternalMessageInfo
+
func (m *LeaseList) Reset() { *m = LeaseList{} }
func (*LeaseList) ProtoMessage() {}
func (*LeaseList) Descriptor() ([]byte, []int) {
- return fileDescriptor_8d4e223b8bb23da3, []int{1}
+ return fileDescriptor_8d4e223b8bb23da3, []int{4}
}
func (m *LeaseList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -105,7 +189,7 @@ var xxx_messageInfo_LeaseList proto.InternalMessageInfo
func (m *LeaseSpec) Reset() { *m = LeaseSpec{} }
func (*LeaseSpec) ProtoMessage() {}
func (*LeaseSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_8d4e223b8bb23da3, []int{2}
+ return fileDescriptor_8d4e223b8bb23da3, []int{5}
}
func (m *LeaseSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -132,6 +216,9 @@ var xxx_messageInfo_LeaseSpec proto.InternalMessageInfo
func init() {
proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1beta1.Lease")
+ proto.RegisterType((*LeaseCandidate)(nil), "k8s.io.api.coordination.v1beta1.LeaseCandidate")
+ proto.RegisterType((*LeaseCandidateList)(nil), "k8s.io.api.coordination.v1beta1.LeaseCandidateList")
+ proto.RegisterType((*LeaseCandidateSpec)(nil), "k8s.io.api.coordination.v1beta1.LeaseCandidateSpec")
proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1beta1.LeaseList")
proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1beta1.LeaseSpec")
}
@@ -141,45 +228,54 @@ func init() {
}
var fileDescriptor_8d4e223b8bb23da3 = []byte{
- // 600 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdf, 0x4e, 0xd4, 0x4e,
- 0x14, 0xc7, 0xb7, 0xb0, 0xfb, 0xfb, 0xb1, 0xb3, 0xf2, 0x27, 0x23, 0x17, 0x0d, 0x17, 0x2d, 0xe1,
- 0xc2, 0x10, 0x12, 0xa7, 0x82, 0xc6, 0x18, 0x13, 0x13, 0x2d, 0x9a, 0x48, 0x2c, 0xd1, 0x14, 0xae,
- 0x0c, 0x89, 0xce, 0xb6, 0x87, 0xee, 0x08, 0xed, 0xd4, 0x99, 0x59, 0x0c, 0x77, 0x3e, 0x82, 0x4f,
- 0xa3, 0xf1, 0x0d, 0xb8, 0xe4, 0x92, 0xab, 0x46, 0xc6, 0xb7, 0xf0, 0xca, 0xcc, 0x6c, 0x61, 0x61,
- 0x81, 0xb0, 0xf1, 0x6e, 0xe7, 0x9c, 0xf3, 0xfd, 0x9c, 0xef, 0x9c, 0xb3, 0x53, 0x14, 0xec, 0x3d,
- 0x91, 0x84, 0xf1, 0x80, 0x96, 0x2c, 0x48, 0x38, 0x17, 0x29, 0x2b, 0xa8, 0x62, 0xbc, 0x08, 0x0e,
- 0x56, 0xbb, 0xa0, 0xe8, 0x6a, 0x90, 0x41, 0x01, 0x82, 0x2a, 0x48, 0x49, 0x29, 0xb8, 0xe2, 0xd8,
- 0x1f, 0x08, 0x08, 0x2d, 0x19, 0xb9, 0x28, 0x20, 0xb5, 0x60, 0xe1, 0x7e, 0xc6, 0x54, 0xaf, 0xdf,
- 0x25, 0x09, 0xcf, 0x83, 0x8c, 0x67, 0x3c, 0xb0, 0xba, 0x6e, 0x7f, 0xd7, 0x9e, 0xec, 0xc1, 0xfe,
- 0x1a, 0xf0, 0x16, 0x56, 0x6e, 0x36, 0x30, 0xda, 0x7b, 0xe1, 0xd1, 0xb0, 0x36, 0xa7, 0x49, 0x8f,
- 0x15, 0x20, 0x0e, 0x83, 0x72, 0x2f, 0x33, 0x01, 0x19, 0xe4, 0xa0, 0xe8, 0x75, 0xaa, 0xe0, 0x26,
- 0x95, 0xe8, 0x17, 0x8a, 0xe5, 0x70, 0x45, 0xf0, 0xf8, 0x36, 0x81, 0x4c, 0x7a, 0x90, 0xd3, 0x51,
- 0xdd, 0xd2, 0x0f, 0x07, 0xb5, 0x22, 0xa0, 0x12, 0xf0, 0x47, 0x34, 0x65, 0xdc, 0xa4, 0x54, 0x51,
- 0xd7, 0x59, 0x74, 0x96, 0x3b, 0x6b, 0x0f, 0xc8, 0x70, 0x6e, 0xe7, 0x50, 0x52, 0xee, 0x65, 0x26,
- 0x20, 0x89, 0xa9, 0x26, 0x07, 0xab, 0xe4, 0x6d, 0xf7, 0x13, 0x24, 0x6a, 0x13, 0x14, 0x0d, 0xf1,
- 0x51, 0xe5, 0x37, 0x74, 0xe5, 0xa3, 0x61, 0x2c, 0x3e, 0xa7, 0xe2, 0x08, 0x35, 0x65, 0x09, 0x89,
- 0x3b, 0x61, 0xe9, 0x2b, 0xe4, 0x96, 0xad, 0x10, 0xeb, 0x6b, 0xab, 0x84, 0x24, 0xbc, 0x53, 0x73,
- 0x9b, 0xe6, 0x14, 0x5b, 0xca, 0xd2, 0x77, 0x07, 0xb5, 0x6d, 0x45, 0xc4, 0xa4, 0xc2, 0x3b, 0x57,
- 0xdc, 0x93, 0xf1, 0xdc, 0x1b, 0xb5, 0xf5, 0x3e, 0x57, 0xf7, 0x98, 0x3a, 0x8b, 0x5c, 0x70, 0xfe,
- 0x06, 0xb5, 0x98, 0x82, 0x5c, 0xba, 0x13, 0x8b, 0x93, 0xcb, 0x9d, 0xb5, 0x7b, 0xe3, 0x59, 0x0f,
- 0xa7, 0x6b, 0x64, 0x6b, 0xc3, 0x88, 0xe3, 0x01, 0x63, 0xe9, 0x67, 0xb3, 0x36, 0x6e, 0x2e, 0x83,
- 0x9f, 0xa2, 0x99, 0x1e, 0xdf, 0x4f, 0x41, 0x6c, 0xa4, 0x50, 0x28, 0xa6, 0x0e, 0xad, 0xfd, 0x76,
- 0x88, 0x75, 0xe5, 0xcf, 0xbc, 0xbe, 0x94, 0x89, 0x47, 0x2a, 0x71, 0x84, 0xe6, 0xf7, 0x0d, 0xe8,
- 0x65, 0x5f, 0xd8, 0xf6, 0x5b, 0x90, 0xf0, 0x22, 0x95, 0x76, 0xc0, 0xad, 0xd0, 0xd5, 0x95, 0x3f,
- 0x1f, 0x5d, 0x93, 0x8f, 0xaf, 0x55, 0xe1, 0x2e, 0xea, 0xd0, 0xe4, 0x73, 0x9f, 0x09, 0xd8, 0x66,
- 0x39, 0xb8, 0x93, 0x76, 0x8a, 0xc1, 0x78, 0x53, 0xdc, 0x64, 0x89, 0xe0, 0x46, 0x16, 0xce, 0xea,
- 0xca, 0xef, 0xbc, 0x18, 0x72, 0xe2, 0x8b, 0x50, 0xbc, 0x83, 0xda, 0x02, 0x0a, 0xf8, 0x62, 0x3b,
- 0x34, 0xff, 0xad, 0xc3, 0xb4, 0xae, 0xfc, 0x76, 0x7c, 0x46, 0x89, 0x87, 0x40, 0xfc, 0x1c, 0xcd,
- 0xd9, 0x9b, 0x6d, 0x0b, 0x5a, 0x48, 0x66, 0xee, 0x26, 0xdd, 0x96, 0x9d, 0xc5, 0xbc, 0xae, 0xfc,
- 0xb9, 0x68, 0x24, 0x17, 0x5f, 0xa9, 0xc6, 0x1f, 0xd0, 0x94, 0x54, 0xe6, 0x7d, 0x64, 0x87, 0xee,
- 0x7f, 0x76, 0x0f, 0xeb, 0xe6, 0x2f, 0xb1, 0x55, 0xc7, 0xfe, 0x54, 0xfe, 0xc3, 0x9b, 0xdf, 0x3e,
- 0x59, 0x3f, 0x3b, 0x43, 0x3a, 0x58, 0x70, 0x2d, 0x8b, 0xcf, 0xa1, 0xf8, 0x19, 0x9a, 0x2d, 0x05,
- 0xec, 0x82, 0x10, 0x90, 0x0e, 0xb6, 0xeb, 0xfe, 0x6f, 0xfb, 0xdc, 0xd5, 0x95, 0x3f, 0xfb, 0xee,
- 0x72, 0x2a, 0x1e, 0xad, 0x0d, 0x5f, 0x1d, 0x9d, 0x7a, 0x8d, 0xe3, 0x53, 0xaf, 0x71, 0x72, 0xea,
- 0x35, 0xbe, 0x6a, 0xcf, 0x39, 0xd2, 0x9e, 0x73, 0xac, 0x3d, 0xe7, 0x44, 0x7b, 0xce, 0x2f, 0xed,
- 0x39, 0xdf, 0x7e, 0x7b, 0x8d, 0xf7, 0xfe, 0x2d, 0x1f, 0xc8, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff,
- 0x57, 0x93, 0xf3, 0xef, 0x42, 0x05, 0x00, 0x00,
+ // 750 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x4e, 0x1b, 0x39,
+ 0x18, 0xcd, 0x40, 0xb2, 0x9b, 0x38, 0x04, 0xb2, 0x5e, 0x56, 0x1a, 0x71, 0x31, 0x83, 0x72, 0xb1,
+ 0x42, 0x48, 0xeb, 0x59, 0x60, 0xb5, 0x5a, 0x6d, 0x55, 0xa9, 0x1d, 0x40, 0x2d, 0x6a, 0x68, 0x91,
+ 0xa1, 0x95, 0x5a, 0x21, 0xb5, 0xce, 0x8c, 0x99, 0xb8, 0x30, 0x3f, 0xf5, 0x38, 0x54, 0xb9, 0xeb,
+ 0x23, 0xf4, 0x69, 0x5a, 0xf5, 0x0d, 0xd2, 0x3b, 0x2e, 0xb9, 0x8a, 0xca, 0x54, 0xea, 0x43, 0xf4,
+ 0xaa, 0xb2, 0x33, 0xf9, 0x27, 0x22, 0x6d, 0x11, 0x77, 0xf1, 0xf7, 0x9d, 0x73, 0xfc, 0x1d, 0xfb,
+ 0x38, 0x1a, 0x60, 0x1d, 0xff, 0x17, 0x23, 0x16, 0x5a, 0x24, 0x62, 0x96, 0x13, 0x86, 0xdc, 0x65,
+ 0x01, 0x11, 0x2c, 0x0c, 0xac, 0xd3, 0xb5, 0x1a, 0x15, 0x64, 0xcd, 0xf2, 0x68, 0x40, 0x39, 0x11,
+ 0xd4, 0x45, 0x11, 0x0f, 0x45, 0x08, 0xcd, 0x0e, 0x01, 0x91, 0x88, 0xa1, 0x41, 0x02, 0x4a, 0x09,
+ 0x4b, 0x7f, 0x79, 0x4c, 0xd4, 0x1b, 0x35, 0xe4, 0x84, 0xbe, 0xe5, 0x85, 0x5e, 0x68, 0x29, 0x5e,
+ 0xad, 0x71, 0xa4, 0x56, 0x6a, 0xa1, 0x7e, 0x75, 0xf4, 0x96, 0x56, 0x27, 0x0f, 0x30, 0xba, 0xf7,
+ 0xd2, 0x3f, 0x7d, 0xac, 0x4f, 0x9c, 0x3a, 0x0b, 0x28, 0x6f, 0x5a, 0xd1, 0xb1, 0x27, 0x0b, 0xb1,
+ 0xe5, 0x53, 0x41, 0x2e, 0x63, 0x59, 0x93, 0x58, 0xbc, 0x11, 0x08, 0xe6, 0xd3, 0x31, 0xc2, 0xbf,
+ 0x57, 0x11, 0x62, 0xa7, 0x4e, 0x7d, 0x32, 0xca, 0xab, 0xbc, 0xd7, 0x40, 0xae, 0x4a, 0x49, 0x4c,
+ 0xe1, 0x0b, 0x90, 0x97, 0xd3, 0xb8, 0x44, 0x10, 0x5d, 0x5b, 0xd6, 0x56, 0x8a, 0xeb, 0x7f, 0xa3,
+ 0xfe, 0xb9, 0xf5, 0x44, 0x51, 0x74, 0xec, 0xc9, 0x42, 0x8c, 0x24, 0x1a, 0x9d, 0xae, 0xa1, 0x47,
+ 0xb5, 0x97, 0xd4, 0x11, 0xbb, 0x54, 0x10, 0x1b, 0xb6, 0xda, 0x66, 0x26, 0x69, 0x9b, 0xa0, 0x5f,
+ 0xc3, 0x3d, 0x55, 0x58, 0x05, 0xd9, 0x38, 0xa2, 0x8e, 0x3e, 0xa3, 0xd4, 0x57, 0xd1, 0x15, 0xb7,
+ 0x82, 0xd4, 0x5c, 0xfb, 0x11, 0x75, 0xec, 0xb9, 0x54, 0x37, 0x2b, 0x57, 0x58, 0xa9, 0x54, 0x3e,
+ 0x6a, 0x60, 0x5e, 0x21, 0x36, 0x49, 0xe0, 0x32, 0x97, 0x88, 0x9b, 0xb0, 0xf0, 0x78, 0xc8, 0xc2,
+ 0xc6, 0x74, 0x16, 0x7a, 0x03, 0x4e, 0xf4, 0xd2, 0xd2, 0x00, 0x1c, 0x86, 0x56, 0x59, 0x2c, 0xe0,
+ 0xe1, 0x98, 0x1f, 0x34, 0x9d, 0x1f, 0xc9, 0x56, 0x6e, 0xca, 0xe9, 0x66, 0xf9, 0x6e, 0x65, 0xc0,
+ 0xcb, 0x01, 0xc8, 0x31, 0x41, 0xfd, 0x58, 0x9f, 0x59, 0x9e, 0x5d, 0x29, 0xae, 0x5b, 0xdf, 0x69,
+ 0xc6, 0x2e, 0xa5, 0xda, 0xb9, 0x1d, 0xa9, 0x82, 0x3b, 0x62, 0x95, 0x2f, 0xb3, 0xa3, 0x56, 0xa4,
+ 0x4f, 0x68, 0x81, 0xc2, 0x89, 0xac, 0x3e, 0x24, 0x3e, 0x55, 0x5e, 0x0a, 0xf6, 0x6f, 0x29, 0xbf,
+ 0x50, 0xed, 0x36, 0x70, 0x1f, 0x03, 0x9f, 0x82, 0x7c, 0xc4, 0x02, 0xef, 0x80, 0xf9, 0x34, 0x3d,
+ 0x6d, 0x6b, 0x3a, 0xef, 0xbb, 0xcc, 0xe1, 0xa1, 0xa4, 0xd9, 0x73, 0xd2, 0xf8, 0x5e, 0x2a, 0x82,
+ 0x7b, 0x72, 0xf0, 0x10, 0x14, 0x38, 0x0d, 0xe8, 0x6b, 0xa5, 0x3d, 0xfb, 0x63, 0xda, 0x25, 0x39,
+ 0x38, 0xee, 0xaa, 0xe0, 0xbe, 0x20, 0xbc, 0x05, 0x4a, 0x35, 0x16, 0x10, 0xde, 0x7c, 0x42, 0x79,
+ 0xcc, 0xc2, 0x40, 0xcf, 0x2a, 0xb7, 0x7f, 0xa4, 0x6e, 0x4b, 0xf6, 0x60, 0x13, 0x0f, 0x63, 0xe1,
+ 0x16, 0x28, 0x53, 0xbf, 0x71, 0xa2, 0xce, 0xbd, 0xcb, 0xcf, 0x29, 0xbe, 0x9e, 0xf2, 0xcb, 0xdb,
+ 0x23, 0x7d, 0x3c, 0xc6, 0x80, 0x0e, 0xc8, 0xc7, 0x42, 0xbe, 0x72, 0xaf, 0xa9, 0xff, 0xa2, 0xd8,
+ 0xf7, 0xba, 0x39, 0xd8, 0x4f, 0xeb, 0x5f, 0xdb, 0xe6, 0xc6, 0xe4, 0x7f, 0x31, 0xb4, 0xd9, 0x5d,
+ 0x53, 0xb7, 0xf3, 0x0a, 0x53, 0x1a, 0xee, 0x09, 0x57, 0xde, 0x69, 0xa0, 0x73, 0x73, 0x37, 0x10,
+ 0xd5, 0x07, 0xc3, 0x51, 0xfd, 0x73, 0xba, 0xa8, 0x4e, 0x48, 0xe8, 0x87, 0x6c, 0x3a, 0xb8, 0x0a,
+ 0xe6, 0xff, 0x60, 0xbe, 0x1e, 0x9e, 0xb8, 0x94, 0xef, 0xb8, 0x34, 0x10, 0x4c, 0x34, 0xd3, 0x74,
+ 0xc2, 0xa4, 0x6d, 0xce, 0xdf, 0x1f, 0xea, 0xe0, 0x11, 0x24, 0xac, 0x82, 0x45, 0x15, 0xd8, 0xad,
+ 0x06, 0x57, 0xdb, 0xef, 0x53, 0x27, 0x0c, 0xdc, 0x58, 0xe5, 0x35, 0x67, 0xeb, 0x49, 0xdb, 0x5c,
+ 0xac, 0x5e, 0xd2, 0xc7, 0x97, 0xb2, 0x60, 0x0d, 0x14, 0x89, 0xf3, 0xaa, 0xc1, 0x38, 0xfd, 0x99,
+ 0x60, 0x2e, 0x24, 0x6d, 0xb3, 0x78, 0xb7, 0xaf, 0x83, 0x07, 0x45, 0x87, 0xa3, 0x9f, 0xbd, 0xee,
+ 0xe8, 0xdf, 0x01, 0x65, 0xe5, 0xec, 0x80, 0x93, 0x20, 0x66, 0xd2, 0x5b, 0xac, 0xd2, 0x9b, 0xb3,
+ 0x17, 0x65, 0x72, 0xab, 0x23, 0x3d, 0x3c, 0x86, 0x86, 0xcf, 0xc7, 0x92, 0xbb, 0x79, 0xad, 0xa9,
+ 0x85, 0xb7, 0xc1, 0x42, 0xc4, 0xe9, 0x11, 0xe5, 0x9c, 0xba, 0x9d, 0xdb, 0xd5, 0x7f, 0x55, 0xfb,
+ 0xfc, 0x9e, 0xb4, 0xcd, 0x85, 0xbd, 0xe1, 0x16, 0x1e, 0xc5, 0xda, 0xdb, 0xad, 0x0b, 0x23, 0x73,
+ 0x76, 0x61, 0x64, 0xce, 0x2f, 0x8c, 0xcc, 0x9b, 0xc4, 0xd0, 0x5a, 0x89, 0xa1, 0x9d, 0x25, 0x86,
+ 0x76, 0x9e, 0x18, 0xda, 0xa7, 0xc4, 0xd0, 0xde, 0x7e, 0x36, 0x32, 0xcf, 0xcc, 0x2b, 0x3e, 0x50,
+ 0xbe, 0x05, 0x00, 0x00, 0xff, 0xff, 0xff, 0x56, 0x51, 0x57, 0xc2, 0x08, 0x00, 0x00,
}
func (m *Lease) Marshal() (dAtA []byte, err error) {
@@ -225,6 +321,163 @@ func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *LeaseCandidate) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseCandidate) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseCandidate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaseCandidateList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseCandidateList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseCandidateList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaseCandidateSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseCandidateSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseCandidateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Strategy)
+ copy(dAtA[i:], m.Strategy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.EmulationVersion)
+ copy(dAtA[i:], m.EmulationVersion)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.EmulationVersion)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.BinaryVersion)
+ copy(dAtA[i:], m.BinaryVersion)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.BinaryVersion)))
+ i--
+ dAtA[i] = 0x22
+ if m.RenewTime != nil {
+ {
+ size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.PingTime != nil {
+ {
+ size, err := m.PingTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.LeaseName)
+ copy(dAtA[i:], m.LeaseName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.LeaseName)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func (m *LeaseList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -374,6 +627,61 @@ func (m *Lease) Size() (n int) {
return n
}
+func (m *LeaseCandidate) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *LeaseCandidateList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LeaseCandidateSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.LeaseName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.PingTime != nil {
+ l = m.PingTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.RenewTime != nil {
+ l = m.RenewTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.BinaryVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.EmulationVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Strategy)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m *LeaseList) Size() (n int) {
if m == nil {
return 0
@@ -443,6 +751,48 @@ func (this *Lease) String() string {
}, "")
return s
}
+func (this *LeaseCandidate) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&LeaseCandidate{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseCandidateSpec", "LeaseCandidateSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *LeaseCandidateList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]LeaseCandidate{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "LeaseCandidate", "LeaseCandidate", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&LeaseCandidateList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *LeaseCandidateSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&LeaseCandidateSpec{`,
+ `LeaseName:` + fmt.Sprintf("%v", this.LeaseName) + `,`,
+ `PingTime:` + strings.Replace(fmt.Sprintf("%v", this.PingTime), "MicroTime", "v1.MicroTime", 1) + `,`,
+ `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`,
+ `BinaryVersion:` + fmt.Sprintf("%v", this.BinaryVersion) + `,`,
+ `EmulationVersion:` + fmt.Sprintf("%v", this.EmulationVersion) + `,`,
+ `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *LeaseList) String() string {
if this == nil {
return "nil"
@@ -599,6 +949,489 @@ func (m *Lease) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *LeaseCandidate) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseCandidate: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseCandidate: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseCandidateList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseCandidateList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseCandidateList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, LeaseCandidate{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseCandidateSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseCandidateSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseCandidateSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LeaseName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LeaseName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PingTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PingTime == nil {
+ m.PingTime = &v1.MicroTime{}
+ }
+ if err := m.PingTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RenewTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RenewTime == nil {
+ m.RenewTime = &v1.MicroTime{}
+ }
+ if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BinaryVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BinaryVersion = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EmulationVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.EmulationVersion = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Strategy = k8s_io_api_coordination_v1.CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *LeaseList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto
index 088811a74..7ca043f52 100644
--- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto
@@ -41,6 +41,75 @@ message Lease {
optional LeaseSpec spec = 2;
}
+// LeaseCandidate defines a candidate for a Lease object.
+// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.
+message LeaseCandidate {
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // spec contains the specification of the Lease.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ optional LeaseCandidateSpec spec = 2;
+}
+
+// LeaseCandidateList is a list of Lease objects.
+message LeaseCandidateList {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // items is a list of schema objects.
+ repeated LeaseCandidate items = 2;
+}
+
+// LeaseCandidateSpec is a specification of a Lease.
+message LeaseCandidateSpec {
+ // LeaseName is the name of the lease for which this candidate is contending.
+ // The limits on this field are the same as on Lease.name. Multiple lease candidates
+ // may reference the same Lease.name.
+ // This field is immutable.
+ // +required
+ optional string leaseName = 1;
+
+ // PingTime is the last time that the server has requested the LeaseCandidate
+ // to renew. It is only done during leader election to check if any
+ // LeaseCandidates have become ineligible. When PingTime is updated, the
+ // LeaseCandidate will respond by updating RenewTime.
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime pingTime = 2;
+
+ // RenewTime is the time that the LeaseCandidate was last updated.
+ // Any time a Lease needs to do leader election, the PingTime field
+ // is updated to signal to the LeaseCandidate that they should update
+ // the RenewTime.
+ // Old LeaseCandidate objects are also garbage collected if it has been hours
+ // since the last renew. The PingTime field is updated regularly to prevent
+ // garbage collection for still active LeaseCandidates.
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 3;
+
+ // BinaryVersion is the binary version. It must be in a semver format without leading `v`.
+ // This field is required.
+ // +required
+ optional string binaryVersion = 4;
+
+ // EmulationVersion is the emulation version. It must be in a semver format without leading `v`.
+ // EmulationVersion must be less than or equal to BinaryVersion.
+ // This field is required when strategy is "OldestEmulationVersion"
+ // +optional
+ optional string emulationVersion = 5;
+
+ // Strategy is the strategy that coordinated leader election will use for picking the leader.
+ // If multiple candidates for the same Lease return different strategies, the strategy provided
+ // by the candidate with the latest BinaryVersion will be used. If there is still conflict,
+ // this is a user error and coordinated leader election will not operate the Lease until resolved.
+ // +required
+ optional string strategy = 6;
+}
+
// LeaseList is a list of Lease objects.
message LeaseList {
// Standard list metadata.
diff --git a/vendor/k8s.io/api/coordination/v1beta1/register.go b/vendor/k8s.io/api/coordination/v1beta1/register.go
index 85efaa64e..bd0016423 100644
--- a/vendor/k8s.io/api/coordination/v1beta1/register.go
+++ b/vendor/k8s.io/api/coordination/v1beta1/register.go
@@ -46,6 +46,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Lease{},
&LeaseList{},
+ &LeaseCandidate{},
+ &LeaseCandidateList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
diff --git a/vendor/k8s.io/api/coordination/v1beta1/types.go b/vendor/k8s.io/api/coordination/v1beta1/types.go
index d63fc30a9..781d29efc 100644
--- a/vendor/k8s.io/api/coordination/v1beta1/types.go
+++ b/vendor/k8s.io/api/coordination/v1beta1/types.go
@@ -91,3 +91,76 @@ type LeaseList struct {
// items is a list of schema objects.
Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"`
}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.33
+
+// LeaseCandidate defines a candidate for a Lease object.
+// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.
+type LeaseCandidate struct {
+ metav1.TypeMeta `json:",inline"`
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // spec contains the specification of the Lease.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ Spec LeaseCandidateSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// LeaseCandidateSpec is a specification of a Lease.
+type LeaseCandidateSpec struct {
+ // LeaseName is the name of the lease for which this candidate is contending.
+ // The limits on this field are the same as on Lease.name. Multiple lease candidates
+ // may reference the same Lease.name.
+ // This field is immutable.
+ // +required
+ LeaseName string `json:"leaseName" protobuf:"bytes,1,name=leaseName"`
+ // PingTime is the last time that the server has requested the LeaseCandidate
+ // to renew. It is only done during leader election to check if any
+ // LeaseCandidates have become ineligible. When PingTime is updated, the
+ // LeaseCandidate will respond by updating RenewTime.
+ // +optional
+ PingTime *metav1.MicroTime `json:"pingTime,omitempty" protobuf:"bytes,2,opt,name=pingTime"`
+ // RenewTime is the time that the LeaseCandidate was last updated.
+ // Any time a Lease needs to do leader election, the PingTime field
+ // is updated to signal to the LeaseCandidate that they should update
+ // the RenewTime.
+ // Old LeaseCandidate objects are also garbage collected if it has been hours
+ // since the last renew. The PingTime field is updated regularly to prevent
+ // garbage collection for still active LeaseCandidates.
+ // +optional
+ RenewTime *metav1.MicroTime `json:"renewTime,omitempty" protobuf:"bytes,3,opt,name=renewTime"`
+ // BinaryVersion is the binary version. It must be in a semver format without leading `v`.
+ // This field is required.
+ // +required
+ BinaryVersion string `json:"binaryVersion" protobuf:"bytes,4,name=binaryVersion"`
+ // EmulationVersion is the emulation version. It must be in a semver format without leading `v`.
+ // EmulationVersion must be less than or equal to BinaryVersion.
+ // This field is required when strategy is "OldestEmulationVersion"
+ // +optional
+ EmulationVersion string `json:"emulationVersion,omitempty" protobuf:"bytes,5,opt,name=emulationVersion"`
+ // Strategy is the strategy that coordinated leader election will use for picking the leader.
+ // If multiple candidates for the same Lease return different strategies, the strategy provided
+ // by the candidate with the latest BinaryVersion will be used. If there is still conflict,
+ // this is a user error and coordinated leader election will not operate the Lease until resolved.
+ // +required
+ Strategy v1.CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.33
+
+// LeaseCandidateList is a list of Lease objects.
+type LeaseCandidateList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // items is a list of schema objects.
+ Items []LeaseCandidate `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go
index 50fe8ea18..35812b77f 100644
--- a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go
@@ -37,6 +37,40 @@ func (Lease) SwaggerDoc() map[string]string {
return map_Lease
}
+var map_LeaseCandidate = map[string]string{
+ "": "LeaseCandidate defines a candidate for a Lease object. Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.",
+ "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (LeaseCandidate) SwaggerDoc() map[string]string {
+ return map_LeaseCandidate
+}
+
+var map_LeaseCandidateList = map[string]string{
+ "": "LeaseCandidateList is a list of Lease objects.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "items is a list of schema objects.",
+}
+
+func (LeaseCandidateList) SwaggerDoc() map[string]string {
+ return map_LeaseCandidateList
+}
+
+var map_LeaseCandidateSpec = map[string]string{
+ "": "LeaseCandidateSpec is a specification of a Lease.",
+ "leaseName": "LeaseName is the name of the lease for which this candidate is contending. The limits on this field are the same as on Lease.name. Multiple lease candidates may reference the same Lease.name. This field is immutable.",
+ "pingTime": "PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime.",
+ "renewTime": "RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.",
+ "binaryVersion": "BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required.",
+ "emulationVersion": "EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\"",
+ "strategy": "Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.",
+}
+
+func (LeaseCandidateSpec) SwaggerDoc() map[string]string {
+ return map_LeaseCandidateSpec
+}
+
var map_LeaseList = map[string]string{
"": "LeaseList is a list of Lease objects.",
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go
index dcef1e346..b990ee247 100644
--- a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go
@@ -53,6 +53,90 @@ func (in *Lease) DeepCopyObject() runtime.Object {
return nil
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LeaseCandidate) DeepCopyInto(out *LeaseCandidate) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidate.
+func (in *LeaseCandidate) DeepCopy() *LeaseCandidate {
+ if in == nil {
+ return nil
+ }
+ out := new(LeaseCandidate)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LeaseCandidate) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LeaseCandidateList) DeepCopyInto(out *LeaseCandidateList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]LeaseCandidate, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateList.
+func (in *LeaseCandidateList) DeepCopy() *LeaseCandidateList {
+ if in == nil {
+ return nil
+ }
+ out := new(LeaseCandidateList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LeaseCandidateList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LeaseCandidateSpec) DeepCopyInto(out *LeaseCandidateSpec) {
+ *out = *in
+ if in.PingTime != nil {
+ in, out := &in.PingTime, &out.PingTime
+ *out = (*in).DeepCopy()
+ }
+ if in.RenewTime != nil {
+ in, out := &in.RenewTime, &out.RenewTime
+ *out = (*in).DeepCopy()
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateSpec.
+func (in *LeaseCandidateSpec) DeepCopy() *LeaseCandidateSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(LeaseCandidateSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LeaseList) DeepCopyInto(out *LeaseList) {
*out = *in
diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go
index 18926aa10..73636edfa 100644
--- a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go
+++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go
@@ -49,6 +49,42 @@ func (in *Lease) APILifecycleRemoved() (major, minor int) {
return 1, 22
}
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *LeaseCandidate) APILifecycleIntroduced() (major, minor int) {
+ return 1, 33
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *LeaseCandidate) APILifecycleDeprecated() (major, minor int) {
+ return 1, 36
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *LeaseCandidate) APILifecycleRemoved() (major, minor int) {
+ return 1, 39
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *LeaseCandidateList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 33
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *LeaseCandidateList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 36
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *LeaseCandidateList) APILifecycleRemoved() (major, minor int) {
+ return 1, 39
+}
+
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *LeaseList) APILifecycleIntroduced() (major, minor int) {
diff --git a/vendor/k8s.io/api/core/v1/doc.go b/vendor/k8s.io/api/core/v1/doc.go
index bc0041b33..e4e9196ae 100644
--- a/vendor/k8s.io/api/core/v1/doc.go
+++ b/vendor/k8s.io/api/core/v1/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// +groupName=
// Package v1 is the v1 version of the core API.
-package v1 // import "k8s.io/api/core/v1"
+package v1
diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go
index 9d466c6d7..a4b8f5842 100644
--- a/vendor/k8s.io/api/core/v1/generated.pb.go
+++ b/vendor/k8s.io/api/core/v1/generated.pb.go
@@ -3213,10 +3213,38 @@ func (m *NodeStatus) XXX_DiscardUnknown() {
var xxx_messageInfo_NodeStatus proto.InternalMessageInfo
+func (m *NodeSwapStatus) Reset() { *m = NodeSwapStatus{} }
+func (*NodeSwapStatus) ProtoMessage() {}
+func (*NodeSwapStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6c07b07c062484ab, []int{113}
+}
+func (m *NodeSwapStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NodeSwapStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NodeSwapStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NodeSwapStatus.Merge(m, src)
+}
+func (m *NodeSwapStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *NodeSwapStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_NodeSwapStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeSwapStatus proto.InternalMessageInfo
+
func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} }
func (*NodeSystemInfo) ProtoMessage() {}
func (*NodeSystemInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{113}
+ return fileDescriptor_6c07b07c062484ab, []int{114}
}
func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3244,7 +3272,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo
func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} }
func (*ObjectFieldSelector) ProtoMessage() {}
func (*ObjectFieldSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{114}
+ return fileDescriptor_6c07b07c062484ab, []int{115}
}
func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3272,7 +3300,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo
func (m *ObjectReference) Reset() { *m = ObjectReference{} }
func (*ObjectReference) ProtoMessage() {}
func (*ObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{115}
+ return fileDescriptor_6c07b07c062484ab, []int{116}
}
func (m *ObjectReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3300,7 +3328,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo
func (m *PersistentVolume) Reset() { *m = PersistentVolume{} }
func (*PersistentVolume) ProtoMessage() {}
func (*PersistentVolume) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{116}
+ return fileDescriptor_6c07b07c062484ab, []int{117}
}
func (m *PersistentVolume) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3328,7 +3356,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo
func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} }
func (*PersistentVolumeClaim) ProtoMessage() {}
func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{117}
+ return fileDescriptor_6c07b07c062484ab, []int{118}
}
func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3356,7 +3384,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo
func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} }
func (*PersistentVolumeClaimCondition) ProtoMessage() {}
func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{118}
+ return fileDescriptor_6c07b07c062484ab, []int{119}
}
func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3384,7 +3412,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo
func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} }
func (*PersistentVolumeClaimList) ProtoMessage() {}
func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{119}
+ return fileDescriptor_6c07b07c062484ab, []int{120}
}
func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3412,7 +3440,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo
func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} }
func (*PersistentVolumeClaimSpec) ProtoMessage() {}
func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{120}
+ return fileDescriptor_6c07b07c062484ab, []int{121}
}
func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3440,7 +3468,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo
func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} }
func (*PersistentVolumeClaimStatus) ProtoMessage() {}
func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{121}
+ return fileDescriptor_6c07b07c062484ab, []int{122}
}
func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3468,7 +3496,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo
func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} }
func (*PersistentVolumeClaimTemplate) ProtoMessage() {}
func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{122}
+ return fileDescriptor_6c07b07c062484ab, []int{123}
}
func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3496,7 +3524,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo
func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} }
func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {}
func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{123}
+ return fileDescriptor_6c07b07c062484ab, []int{124}
}
func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3524,7 +3552,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo
func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} }
func (*PersistentVolumeList) ProtoMessage() {}
func (*PersistentVolumeList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{124}
+ return fileDescriptor_6c07b07c062484ab, []int{125}
}
func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3552,7 +3580,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo
func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} }
func (*PersistentVolumeSource) ProtoMessage() {}
func (*PersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{125}
+ return fileDescriptor_6c07b07c062484ab, []int{126}
}
func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3580,7 +3608,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo
func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} }
func (*PersistentVolumeSpec) ProtoMessage() {}
func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{126}
+ return fileDescriptor_6c07b07c062484ab, []int{127}
}
func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3608,7 +3636,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo
func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} }
func (*PersistentVolumeStatus) ProtoMessage() {}
func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{127}
+ return fileDescriptor_6c07b07c062484ab, []int{128}
}
func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3636,7 +3664,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo
func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} }
func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {}
func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{128}
+ return fileDescriptor_6c07b07c062484ab, []int{129}
}
func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3664,7 +3692,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo
func (m *Pod) Reset() { *m = Pod{} }
func (*Pod) ProtoMessage() {}
func (*Pod) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{129}
+ return fileDescriptor_6c07b07c062484ab, []int{130}
}
func (m *Pod) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3692,7 +3720,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo
func (m *PodAffinity) Reset() { *m = PodAffinity{} }
func (*PodAffinity) ProtoMessage() {}
func (*PodAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{130}
+ return fileDescriptor_6c07b07c062484ab, []int{131}
}
func (m *PodAffinity) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3720,7 +3748,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo
func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} }
func (*PodAffinityTerm) ProtoMessage() {}
func (*PodAffinityTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{131}
+ return fileDescriptor_6c07b07c062484ab, []int{132}
}
func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3748,7 +3776,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo
func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} }
func (*PodAntiAffinity) ProtoMessage() {}
func (*PodAntiAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{132}
+ return fileDescriptor_6c07b07c062484ab, []int{133}
}
func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3776,7 +3804,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo
func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} }
func (*PodAttachOptions) ProtoMessage() {}
func (*PodAttachOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{133}
+ return fileDescriptor_6c07b07c062484ab, []int{134}
}
func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3804,7 +3832,7 @@ var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo
func (m *PodCondition) Reset() { *m = PodCondition{} }
func (*PodCondition) ProtoMessage() {}
func (*PodCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{134}
+ return fileDescriptor_6c07b07c062484ab, []int{135}
}
func (m *PodCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3832,7 +3860,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo
func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} }
func (*PodDNSConfig) ProtoMessage() {}
func (*PodDNSConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{135}
+ return fileDescriptor_6c07b07c062484ab, []int{136}
}
func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3860,7 +3888,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo
func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} }
func (*PodDNSConfigOption) ProtoMessage() {}
func (*PodDNSConfigOption) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{136}
+ return fileDescriptor_6c07b07c062484ab, []int{137}
}
func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3888,7 +3916,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo
func (m *PodExecOptions) Reset() { *m = PodExecOptions{} }
func (*PodExecOptions) ProtoMessage() {}
func (*PodExecOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{137}
+ return fileDescriptor_6c07b07c062484ab, []int{138}
}
func (m *PodExecOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3916,7 +3944,7 @@ var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo
func (m *PodIP) Reset() { *m = PodIP{} }
func (*PodIP) ProtoMessage() {}
func (*PodIP) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{138}
+ return fileDescriptor_6c07b07c062484ab, []int{139}
}
func (m *PodIP) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3944,7 +3972,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo
func (m *PodList) Reset() { *m = PodList{} }
func (*PodList) ProtoMessage() {}
func (*PodList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{139}
+ return fileDescriptor_6c07b07c062484ab, []int{140}
}
func (m *PodList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3972,7 +4000,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo
func (m *PodLogOptions) Reset() { *m = PodLogOptions{} }
func (*PodLogOptions) ProtoMessage() {}
func (*PodLogOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{140}
+ return fileDescriptor_6c07b07c062484ab, []int{141}
}
func (m *PodLogOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4000,7 +4028,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo
func (m *PodOS) Reset() { *m = PodOS{} }
func (*PodOS) ProtoMessage() {}
func (*PodOS) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{141}
+ return fileDescriptor_6c07b07c062484ab, []int{142}
}
func (m *PodOS) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4028,7 +4056,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo
func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} }
func (*PodPortForwardOptions) ProtoMessage() {}
func (*PodPortForwardOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{142}
+ return fileDescriptor_6c07b07c062484ab, []int{143}
}
func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4056,7 +4084,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo
func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} }
func (*PodProxyOptions) ProtoMessage() {}
func (*PodProxyOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{143}
+ return fileDescriptor_6c07b07c062484ab, []int{144}
}
func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4084,7 +4112,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo
func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} }
func (*PodReadinessGate) ProtoMessage() {}
func (*PodReadinessGate) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{144}
+ return fileDescriptor_6c07b07c062484ab, []int{145}
}
func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4112,7 +4140,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo
func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} }
func (*PodResourceClaim) ProtoMessage() {}
func (*PodResourceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{145}
+ return fileDescriptor_6c07b07c062484ab, []int{146}
}
func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4140,7 +4168,7 @@ var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo
func (m *PodResourceClaimStatus) Reset() { *m = PodResourceClaimStatus{} }
func (*PodResourceClaimStatus) ProtoMessage() {}
func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{146}
+ return fileDescriptor_6c07b07c062484ab, []int{147}
}
func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4168,7 +4196,7 @@ var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo
func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} }
func (*PodSchedulingGate) ProtoMessage() {}
func (*PodSchedulingGate) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{147}
+ return fileDescriptor_6c07b07c062484ab, []int{148}
}
func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4196,7 +4224,7 @@ var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo
func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} }
func (*PodSecurityContext) ProtoMessage() {}
func (*PodSecurityContext) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{148}
+ return fileDescriptor_6c07b07c062484ab, []int{149}
}
func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4224,7 +4252,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo
func (m *PodSignature) Reset() { *m = PodSignature{} }
func (*PodSignature) ProtoMessage() {}
func (*PodSignature) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{149}
+ return fileDescriptor_6c07b07c062484ab, []int{150}
}
func (m *PodSignature) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4252,7 +4280,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo
func (m *PodSpec) Reset() { *m = PodSpec{} }
func (*PodSpec) ProtoMessage() {}
func (*PodSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{150}
+ return fileDescriptor_6c07b07c062484ab, []int{151}
}
func (m *PodSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4280,7 +4308,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo
func (m *PodStatus) Reset() { *m = PodStatus{} }
func (*PodStatus) ProtoMessage() {}
func (*PodStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{151}
+ return fileDescriptor_6c07b07c062484ab, []int{152}
}
func (m *PodStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4308,7 +4336,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo
func (m *PodStatusResult) Reset() { *m = PodStatusResult{} }
func (*PodStatusResult) ProtoMessage() {}
func (*PodStatusResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{152}
+ return fileDescriptor_6c07b07c062484ab, []int{153}
}
func (m *PodStatusResult) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4336,7 +4364,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo
func (m *PodTemplate) Reset() { *m = PodTemplate{} }
func (*PodTemplate) ProtoMessage() {}
func (*PodTemplate) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{153}
+ return fileDescriptor_6c07b07c062484ab, []int{154}
}
func (m *PodTemplate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4364,7 +4392,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo
func (m *PodTemplateList) Reset() { *m = PodTemplateList{} }
func (*PodTemplateList) ProtoMessage() {}
func (*PodTemplateList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{154}
+ return fileDescriptor_6c07b07c062484ab, []int{155}
}
func (m *PodTemplateList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4392,7 +4420,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo
func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} }
func (*PodTemplateSpec) ProtoMessage() {}
func (*PodTemplateSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{155}
+ return fileDescriptor_6c07b07c062484ab, []int{156}
}
func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4420,7 +4448,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo
func (m *PortStatus) Reset() { *m = PortStatus{} }
func (*PortStatus) ProtoMessage() {}
func (*PortStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{156}
+ return fileDescriptor_6c07b07c062484ab, []int{157}
}
func (m *PortStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4448,7 +4476,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo
func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} }
func (*PortworxVolumeSource) ProtoMessage() {}
func (*PortworxVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{157}
+ return fileDescriptor_6c07b07c062484ab, []int{158}
}
func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4476,7 +4504,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo
func (m *Preconditions) Reset() { *m = Preconditions{} }
func (*Preconditions) ProtoMessage() {}
func (*Preconditions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{158}
+ return fileDescriptor_6c07b07c062484ab, []int{159}
}
func (m *Preconditions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4504,7 +4532,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo
func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} }
func (*PreferAvoidPodsEntry) ProtoMessage() {}
func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{159}
+ return fileDescriptor_6c07b07c062484ab, []int{160}
}
func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4532,7 +4560,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo
func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} }
func (*PreferredSchedulingTerm) ProtoMessage() {}
func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{160}
+ return fileDescriptor_6c07b07c062484ab, []int{161}
}
func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4560,7 +4588,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo
func (m *Probe) Reset() { *m = Probe{} }
func (*Probe) ProtoMessage() {}
func (*Probe) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{161}
+ return fileDescriptor_6c07b07c062484ab, []int{162}
}
func (m *Probe) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4588,7 +4616,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo
func (m *ProbeHandler) Reset() { *m = ProbeHandler{} }
func (*ProbeHandler) ProtoMessage() {}
func (*ProbeHandler) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{162}
+ return fileDescriptor_6c07b07c062484ab, []int{163}
}
func (m *ProbeHandler) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4616,7 +4644,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo
func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} }
func (*ProjectedVolumeSource) ProtoMessage() {}
func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{163}
+ return fileDescriptor_6c07b07c062484ab, []int{164}
}
func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4644,7 +4672,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo
func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} }
func (*QuobyteVolumeSource) ProtoMessage() {}
func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{164}
+ return fileDescriptor_6c07b07c062484ab, []int{165}
}
func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4672,7 +4700,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo
func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} }
func (*RBDPersistentVolumeSource) ProtoMessage() {}
func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{165}
+ return fileDescriptor_6c07b07c062484ab, []int{166}
}
func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4700,7 +4728,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo
func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} }
func (*RBDVolumeSource) ProtoMessage() {}
func (*RBDVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{166}
+ return fileDescriptor_6c07b07c062484ab, []int{167}
}
func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4728,7 +4756,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo
func (m *RangeAllocation) Reset() { *m = RangeAllocation{} }
func (*RangeAllocation) ProtoMessage() {}
func (*RangeAllocation) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{167}
+ return fileDescriptor_6c07b07c062484ab, []int{168}
}
func (m *RangeAllocation) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4756,7 +4784,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo
func (m *ReplicationController) Reset() { *m = ReplicationController{} }
func (*ReplicationController) ProtoMessage() {}
func (*ReplicationController) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{168}
+ return fileDescriptor_6c07b07c062484ab, []int{169}
}
func (m *ReplicationController) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4784,7 +4812,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo
func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} }
func (*ReplicationControllerCondition) ProtoMessage() {}
func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{169}
+ return fileDescriptor_6c07b07c062484ab, []int{170}
}
func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4812,7 +4840,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo
func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} }
func (*ReplicationControllerList) ProtoMessage() {}
func (*ReplicationControllerList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{170}
+ return fileDescriptor_6c07b07c062484ab, []int{171}
}
func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4840,7 +4868,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo
func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} }
func (*ReplicationControllerSpec) ProtoMessage() {}
func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{171}
+ return fileDescriptor_6c07b07c062484ab, []int{172}
}
func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4868,7 +4896,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo
func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} }
func (*ReplicationControllerStatus) ProtoMessage() {}
func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{172}
+ return fileDescriptor_6c07b07c062484ab, []int{173}
}
func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4896,7 +4924,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo
func (m *ResourceClaim) Reset() { *m = ResourceClaim{} }
func (*ResourceClaim) ProtoMessage() {}
func (*ResourceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{173}
+ return fileDescriptor_6c07b07c062484ab, []int{174}
}
func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4924,7 +4952,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} }
func (*ResourceFieldSelector) ProtoMessage() {}
func (*ResourceFieldSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{174}
+ return fileDescriptor_6c07b07c062484ab, []int{175}
}
func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4952,7 +4980,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo
func (m *ResourceHealth) Reset() { *m = ResourceHealth{} }
func (*ResourceHealth) ProtoMessage() {}
func (*ResourceHealth) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{175}
+ return fileDescriptor_6c07b07c062484ab, []int{176}
}
func (m *ResourceHealth) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4980,7 +5008,7 @@ var xxx_messageInfo_ResourceHealth proto.InternalMessageInfo
func (m *ResourceQuota) Reset() { *m = ResourceQuota{} }
func (*ResourceQuota) ProtoMessage() {}
func (*ResourceQuota) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{176}
+ return fileDescriptor_6c07b07c062484ab, []int{177}
}
func (m *ResourceQuota) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5008,7 +5036,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo
func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} }
func (*ResourceQuotaList) ProtoMessage() {}
func (*ResourceQuotaList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{177}
+ return fileDescriptor_6c07b07c062484ab, []int{178}
}
func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5036,7 +5064,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo
func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} }
func (*ResourceQuotaSpec) ProtoMessage() {}
func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{178}
+ return fileDescriptor_6c07b07c062484ab, []int{179}
}
func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5064,7 +5092,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo
func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} }
func (*ResourceQuotaStatus) ProtoMessage() {}
func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{179}
+ return fileDescriptor_6c07b07c062484ab, []int{180}
}
func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5092,7 +5120,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo
func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} }
func (*ResourceRequirements) ProtoMessage() {}
func (*ResourceRequirements) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{180}
+ return fileDescriptor_6c07b07c062484ab, []int{181}
}
func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5120,7 +5148,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo
func (m *ResourceStatus) Reset() { *m = ResourceStatus{} }
func (*ResourceStatus) ProtoMessage() {}
func (*ResourceStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{181}
+ return fileDescriptor_6c07b07c062484ab, []int{182}
}
func (m *ResourceStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5148,7 +5176,7 @@ var xxx_messageInfo_ResourceStatus proto.InternalMessageInfo
func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} }
func (*SELinuxOptions) ProtoMessage() {}
func (*SELinuxOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{182}
+ return fileDescriptor_6c07b07c062484ab, []int{183}
}
func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5176,7 +5204,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo
func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} }
func (*ScaleIOPersistentVolumeSource) ProtoMessage() {}
func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{183}
+ return fileDescriptor_6c07b07c062484ab, []int{184}
}
func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5204,7 +5232,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo
func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} }
func (*ScaleIOVolumeSource) ProtoMessage() {}
func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{184}
+ return fileDescriptor_6c07b07c062484ab, []int{185}
}
func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5232,7 +5260,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo
func (m *ScopeSelector) Reset() { *m = ScopeSelector{} }
func (*ScopeSelector) ProtoMessage() {}
func (*ScopeSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{185}
+ return fileDescriptor_6c07b07c062484ab, []int{186}
}
func (m *ScopeSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5260,7 +5288,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo
func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} }
func (*ScopedResourceSelectorRequirement) ProtoMessage() {}
func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{186}
+ return fileDescriptor_6c07b07c062484ab, []int{187}
}
func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5288,7 +5316,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo
func (m *SeccompProfile) Reset() { *m = SeccompProfile{} }
func (*SeccompProfile) ProtoMessage() {}
func (*SeccompProfile) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{187}
+ return fileDescriptor_6c07b07c062484ab, []int{188}
}
func (m *SeccompProfile) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5316,7 +5344,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo
func (m *Secret) Reset() { *m = Secret{} }
func (*Secret) ProtoMessage() {}
func (*Secret) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{188}
+ return fileDescriptor_6c07b07c062484ab, []int{189}
}
func (m *Secret) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5344,7 +5372,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo
func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} }
func (*SecretEnvSource) ProtoMessage() {}
func (*SecretEnvSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{189}
+ return fileDescriptor_6c07b07c062484ab, []int{190}
}
func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5372,7 +5400,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo
func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} }
func (*SecretKeySelector) ProtoMessage() {}
func (*SecretKeySelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{190}
+ return fileDescriptor_6c07b07c062484ab, []int{191}
}
func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5400,7 +5428,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo
func (m *SecretList) Reset() { *m = SecretList{} }
func (*SecretList) ProtoMessage() {}
func (*SecretList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{191}
+ return fileDescriptor_6c07b07c062484ab, []int{192}
}
func (m *SecretList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5428,7 +5456,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo
func (m *SecretProjection) Reset() { *m = SecretProjection{} }
func (*SecretProjection) ProtoMessage() {}
func (*SecretProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{192}
+ return fileDescriptor_6c07b07c062484ab, []int{193}
}
func (m *SecretProjection) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5456,7 +5484,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo
func (m *SecretReference) Reset() { *m = SecretReference{} }
func (*SecretReference) ProtoMessage() {}
func (*SecretReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{193}
+ return fileDescriptor_6c07b07c062484ab, []int{194}
}
func (m *SecretReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5484,7 +5512,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo
func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} }
func (*SecretVolumeSource) ProtoMessage() {}
func (*SecretVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{194}
+ return fileDescriptor_6c07b07c062484ab, []int{195}
}
func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5512,7 +5540,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo
func (m *SecurityContext) Reset() { *m = SecurityContext{} }
func (*SecurityContext) ProtoMessage() {}
func (*SecurityContext) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{195}
+ return fileDescriptor_6c07b07c062484ab, []int{196}
}
func (m *SecurityContext) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5540,7 +5568,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo
func (m *SerializedReference) Reset() { *m = SerializedReference{} }
func (*SerializedReference) ProtoMessage() {}
func (*SerializedReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{196}
+ return fileDescriptor_6c07b07c062484ab, []int{197}
}
func (m *SerializedReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5568,7 +5596,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo
func (m *Service) Reset() { *m = Service{} }
func (*Service) ProtoMessage() {}
func (*Service) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{197}
+ return fileDescriptor_6c07b07c062484ab, []int{198}
}
func (m *Service) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5596,7 +5624,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo
func (m *ServiceAccount) Reset() { *m = ServiceAccount{} }
func (*ServiceAccount) ProtoMessage() {}
func (*ServiceAccount) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{198}
+ return fileDescriptor_6c07b07c062484ab, []int{199}
}
func (m *ServiceAccount) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5624,7 +5652,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo
func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} }
func (*ServiceAccountList) ProtoMessage() {}
func (*ServiceAccountList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{199}
+ return fileDescriptor_6c07b07c062484ab, []int{200}
}
func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5652,7 +5680,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo
func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} }
func (*ServiceAccountTokenProjection) ProtoMessage() {}
func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{200}
+ return fileDescriptor_6c07b07c062484ab, []int{201}
}
func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5680,7 +5708,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo
func (m *ServiceList) Reset() { *m = ServiceList{} }
func (*ServiceList) ProtoMessage() {}
func (*ServiceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{201}
+ return fileDescriptor_6c07b07c062484ab, []int{202}
}
func (m *ServiceList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5708,7 +5736,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo
func (m *ServicePort) Reset() { *m = ServicePort{} }
func (*ServicePort) ProtoMessage() {}
func (*ServicePort) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{202}
+ return fileDescriptor_6c07b07c062484ab, []int{203}
}
func (m *ServicePort) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5736,7 +5764,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo
func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} }
func (*ServiceProxyOptions) ProtoMessage() {}
func (*ServiceProxyOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{203}
+ return fileDescriptor_6c07b07c062484ab, []int{204}
}
func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5764,7 +5792,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo
func (m *ServiceSpec) Reset() { *m = ServiceSpec{} }
func (*ServiceSpec) ProtoMessage() {}
func (*ServiceSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{204}
+ return fileDescriptor_6c07b07c062484ab, []int{205}
}
func (m *ServiceSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5792,7 +5820,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo
func (m *ServiceStatus) Reset() { *m = ServiceStatus{} }
func (*ServiceStatus) ProtoMessage() {}
func (*ServiceStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{205}
+ return fileDescriptor_6c07b07c062484ab, []int{206}
}
func (m *ServiceStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5820,7 +5848,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo
func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} }
func (*SessionAffinityConfig) ProtoMessage() {}
func (*SessionAffinityConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{206}
+ return fileDescriptor_6c07b07c062484ab, []int{207}
}
func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5848,7 +5876,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo
func (m *SleepAction) Reset() { *m = SleepAction{} }
func (*SleepAction) ProtoMessage() {}
func (*SleepAction) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{207}
+ return fileDescriptor_6c07b07c062484ab, []int{208}
}
func (m *SleepAction) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5876,7 +5904,7 @@ var xxx_messageInfo_SleepAction proto.InternalMessageInfo
func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} }
func (*StorageOSPersistentVolumeSource) ProtoMessage() {}
func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{208}
+ return fileDescriptor_6c07b07c062484ab, []int{209}
}
func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5904,7 +5932,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo
func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} }
func (*StorageOSVolumeSource) ProtoMessage() {}
func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{209}
+ return fileDescriptor_6c07b07c062484ab, []int{210}
}
func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5932,7 +5960,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo
func (m *Sysctl) Reset() { *m = Sysctl{} }
func (*Sysctl) ProtoMessage() {}
func (*Sysctl) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{210}
+ return fileDescriptor_6c07b07c062484ab, []int{211}
}
func (m *Sysctl) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5960,7 +5988,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo
func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} }
func (*TCPSocketAction) ProtoMessage() {}
func (*TCPSocketAction) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{211}
+ return fileDescriptor_6c07b07c062484ab, []int{212}
}
func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5988,7 +6016,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo
func (m *Taint) Reset() { *m = Taint{} }
func (*Taint) ProtoMessage() {}
func (*Taint) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{212}
+ return fileDescriptor_6c07b07c062484ab, []int{213}
}
func (m *Taint) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6016,7 +6044,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo
func (m *Toleration) Reset() { *m = Toleration{} }
func (*Toleration) ProtoMessage() {}
func (*Toleration) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{213}
+ return fileDescriptor_6c07b07c062484ab, []int{214}
}
func (m *Toleration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6044,7 +6072,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo
func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} }
func (*TopologySelectorLabelRequirement) ProtoMessage() {}
func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{214}
+ return fileDescriptor_6c07b07c062484ab, []int{215}
}
func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6072,7 +6100,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo
func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} }
func (*TopologySelectorTerm) ProtoMessage() {}
func (*TopologySelectorTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{215}
+ return fileDescriptor_6c07b07c062484ab, []int{216}
}
func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6100,7 +6128,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo
func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} }
func (*TopologySpreadConstraint) ProtoMessage() {}
func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{216}
+ return fileDescriptor_6c07b07c062484ab, []int{217}
}
func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6128,7 +6156,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo
func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} }
func (*TypedLocalObjectReference) ProtoMessage() {}
func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{217}
+ return fileDescriptor_6c07b07c062484ab, []int{218}
}
func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6156,7 +6184,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo
func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} }
func (*TypedObjectReference) ProtoMessage() {}
func (*TypedObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{218}
+ return fileDescriptor_6c07b07c062484ab, []int{219}
}
func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6184,7 +6212,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo
func (m *Volume) Reset() { *m = Volume{} }
func (*Volume) ProtoMessage() {}
func (*Volume) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{219}
+ return fileDescriptor_6c07b07c062484ab, []int{220}
}
func (m *Volume) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6212,7 +6240,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo
func (m *VolumeDevice) Reset() { *m = VolumeDevice{} }
func (*VolumeDevice) ProtoMessage() {}
func (*VolumeDevice) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{220}
+ return fileDescriptor_6c07b07c062484ab, []int{221}
}
func (m *VolumeDevice) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6240,7 +6268,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo
func (m *VolumeMount) Reset() { *m = VolumeMount{} }
func (*VolumeMount) ProtoMessage() {}
func (*VolumeMount) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{221}
+ return fileDescriptor_6c07b07c062484ab, []int{222}
}
func (m *VolumeMount) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6268,7 +6296,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo
func (m *VolumeMountStatus) Reset() { *m = VolumeMountStatus{} }
func (*VolumeMountStatus) ProtoMessage() {}
func (*VolumeMountStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{222}
+ return fileDescriptor_6c07b07c062484ab, []int{223}
}
func (m *VolumeMountStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6296,7 +6324,7 @@ var xxx_messageInfo_VolumeMountStatus proto.InternalMessageInfo
func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} }
func (*VolumeNodeAffinity) ProtoMessage() {}
func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{223}
+ return fileDescriptor_6c07b07c062484ab, []int{224}
}
func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6324,7 +6352,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo
func (m *VolumeProjection) Reset() { *m = VolumeProjection{} }
func (*VolumeProjection) ProtoMessage() {}
func (*VolumeProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{224}
+ return fileDescriptor_6c07b07c062484ab, []int{225}
}
func (m *VolumeProjection) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6352,7 +6380,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo
func (m *VolumeResourceRequirements) Reset() { *m = VolumeResourceRequirements{} }
func (*VolumeResourceRequirements) ProtoMessage() {}
func (*VolumeResourceRequirements) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{225}
+ return fileDescriptor_6c07b07c062484ab, []int{226}
}
func (m *VolumeResourceRequirements) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6380,7 +6408,7 @@ var xxx_messageInfo_VolumeResourceRequirements proto.InternalMessageInfo
func (m *VolumeSource) Reset() { *m = VolumeSource{} }
func (*VolumeSource) ProtoMessage() {}
func (*VolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{226}
+ return fileDescriptor_6c07b07c062484ab, []int{227}
}
func (m *VolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6408,7 +6436,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo
func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} }
func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {}
func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{227}
+ return fileDescriptor_6c07b07c062484ab, []int{228}
}
func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6436,7 +6464,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo
func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} }
func (*WeightedPodAffinityTerm) ProtoMessage() {}
func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{228}
+ return fileDescriptor_6c07b07c062484ab, []int{229}
}
func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6464,7 +6492,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo
func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} }
func (*WindowsSecurityContextOptions) ProtoMessage() {}
func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{229}
+ return fileDescriptor_6c07b07c062484ab, []int{230}
}
func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6617,6 +6645,7 @@ func init() {
proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus")
proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.AllocatableEntry")
proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.CapacityEntry")
+ proto.RegisterType((*NodeSwapStatus)(nil), "k8s.io.api.core.v1.NodeSwapStatus")
proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo")
proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector")
proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference")
@@ -6758,1015 +6787,1020 @@ func init() {
}
var fileDescriptor_6c07b07c062484ab = []byte{
- // 16114 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x64, 0xd9,
- 0x59, 0x28, 0xa6, 0x9b, 0x59, 0xeb, 0x57, 0xfb, 0xa9, 0x5e, 0xaa, 0x6b, 0xba, 0x3b, 0x7b, 0xee,
- 0xcc, 0xf4, 0xf4, 0x6c, 0xd5, 0xea, 0x59, 0x34, 0xad, 0x99, 0xd1, 0x30, 0xb5, 0x76, 0xd7, 0x74,
- 0x57, 0x75, 0xce, 0xc9, 0xaa, 0x6e, 0x69, 0x34, 0x12, 0xba, 0x9d, 0x79, 0xaa, 0xea, 0xaa, 0x32,
- 0xef, 0xcd, 0xb9, 0xf7, 0x66, 0x75, 0x57, 0x5b, 0x04, 0x20, 0x8c, 0x40, 0x02, 0x47, 0x28, 0x08,
- 0x6c, 0x1c, 0x82, 0xe0, 0x07, 0x60, 0x16, 0xcb, 0x60, 0x64, 0x61, 0xc0, 0x88, 0xcd, 0x36, 0x8e,
- 0x00, 0xff, 0xc0, 0x98, 0x08, 0x4b, 0x84, 0x09, 0x17, 0x56, 0xe1, 0x08, 0x82, 0x1f, 0x06, 0x82,
- 0xf7, 0x7e, 0xbc, 0x57, 0xc1, 0x7b, 0xbc, 0x38, 0xeb, 0x3d, 0xe7, 0x2e, 0x99, 0x59, 0x3d, 0xdd,
- 0xa5, 0x91, 0x62, 0xfe, 0x65, 0x9e, 0xef, 0x3b, 0xdf, 0x39, 0xf7, 0xac, 0xdf, 0xf9, 0x56, 0xb0,
- 0xb7, 0x2f, 0x87, 0x33, 0xae, 0x7f, 0xd1, 0x69, 0xba, 0x17, 0xab, 0x7e, 0x40, 0x2e, 0xee, 0x5c,
- 0xba, 0xb8, 0x49, 0x3c, 0x12, 0x38, 0x11, 0xa9, 0xcd, 0x34, 0x03, 0x3f, 0xf2, 0x11, 0xe2, 0x38,
- 0x33, 0x4e, 0xd3, 0x9d, 0xa1, 0x38, 0x33, 0x3b, 0x97, 0xa6, 0x9f, 0xdb, 0x74, 0xa3, 0xad, 0xd6,
- 0xed, 0x99, 0xaa, 0xdf, 0xb8, 0xb8, 0xe9, 0x6f, 0xfa, 0x17, 0x19, 0xea, 0xed, 0xd6, 0x06, 0xfb,
- 0xc7, 0xfe, 0xb0, 0x5f, 0x9c, 0xc4, 0xf4, 0x8b, 0x71, 0x33, 0x0d, 0xa7, 0xba, 0xe5, 0x7a, 0x24,
- 0xd8, 0xbd, 0xd8, 0xdc, 0xde, 0x64, 0xed, 0x06, 0x24, 0xf4, 0x5b, 0x41, 0x95, 0x24, 0x1b, 0x6e,
- 0x5b, 0x2b, 0xbc, 0xd8, 0x20, 0x91, 0x93, 0xd1, 0xdd, 0xe9, 0x8b, 0x79, 0xb5, 0x82, 0x96, 0x17,
- 0xb9, 0x8d, 0x74, 0x33, 0x1f, 0xe9, 0x54, 0x21, 0xac, 0x6e, 0x91, 0x86, 0x93, 0xaa, 0xf7, 0x42,
- 0x5e, 0xbd, 0x56, 0xe4, 0xd6, 0x2f, 0xba, 0x5e, 0x14, 0x46, 0x41, 0xb2, 0x92, 0xfd, 0x2d, 0x0b,
- 0xce, 0xcd, 0xde, 0xaa, 0x2c, 0xd6, 0x9d, 0x30, 0x72, 0xab, 0x73, 0x75, 0xbf, 0xba, 0x5d, 0x89,
- 0xfc, 0x80, 0xdc, 0xf4, 0xeb, 0xad, 0x06, 0xa9, 0xb0, 0x81, 0x40, 0xcf, 0xc2, 0xc0, 0x0e, 0xfb,
- 0xbf, 0xbc, 0x30, 0x65, 0x9d, 0xb3, 0x2e, 0x0c, 0xce, 0x8d, 0xff, 0xe9, 0x5e, 0xe9, 0x43, 0xfb,
- 0x7b, 0xa5, 0x81, 0x9b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x1e, 0xfa, 0x36, 0xc2, 0xb5, 0xdd, 0x26,
- 0x99, 0x2a, 0x30, 0xdc, 0x51, 0x81, 0xdb, 0xb7, 0x54, 0xa1, 0xa5, 0x58, 0x40, 0xd1, 0x45, 0x18,
- 0x6c, 0x3a, 0x41, 0xe4, 0x46, 0xae, 0xef, 0x4d, 0x15, 0xcf, 0x59, 0x17, 0x7a, 0xe7, 0x26, 0x04,
- 0xea, 0x60, 0x59, 0x02, 0x70, 0x8c, 0x43, 0xbb, 0x11, 0x10, 0xa7, 0x76, 0xc3, 0xab, 0xef, 0x4e,
- 0xf5, 0x9c, 0xb3, 0x2e, 0x0c, 0xc4, 0xdd, 0xc0, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x2b, 0x05, 0x18,
- 0x98, 0xdd, 0xd8, 0x70, 0x3d, 0x37, 0xda, 0x45, 0x37, 0x61, 0xd8, 0xf3, 0x6b, 0x44, 0xfe, 0x67,
- 0x5f, 0x31, 0xf4, 0xfc, 0xb9, 0x99, 0xf4, 0x52, 0x9a, 0x59, 0xd5, 0xf0, 0xe6, 0xc6, 0xf7, 0xf7,
- 0x4a, 0xc3, 0x7a, 0x09, 0x36, 0xe8, 0x20, 0x0c, 0x43, 0x4d, 0xbf, 0xa6, 0xc8, 0x16, 0x18, 0xd9,
- 0x52, 0x16, 0xd9, 0x72, 0x8c, 0x36, 0x37, 0xb6, 0xbf, 0x57, 0x1a, 0xd2, 0x0a, 0xb0, 0x4e, 0x04,
- 0xdd, 0x86, 0x31, 0xfa, 0xd7, 0x8b, 0x5c, 0x45, 0xb7, 0xc8, 0xe8, 0x3e, 0x96, 0x47, 0x57, 0x43,
- 0x9d, 0x9b, 0xdc, 0xdf, 0x2b, 0x8d, 0x25, 0x0a, 0x71, 0x92, 0xa0, 0xfd, 0x93, 0x16, 0x8c, 0xcd,
- 0x36, 0x9b, 0xb3, 0x41, 0xc3, 0x0f, 0xca, 0x81, 0xbf, 0xe1, 0xd6, 0x09, 0x7a, 0x19, 0x7a, 0x22,
- 0x3a, 0x6b, 0x7c, 0x86, 0x1f, 0x13, 0x43, 0xdb, 0x43, 0xe7, 0xea, 0x60, 0xaf, 0x34, 0x99, 0x40,
- 0x67, 0x53, 0xc9, 0x2a, 0xa0, 0x37, 0x60, 0xbc, 0xee, 0x57, 0x9d, 0xfa, 0x96, 0x1f, 0x46, 0x02,
- 0x2a, 0xa6, 0xfe, 0xd8, 0xfe, 0x5e, 0x69, 0xfc, 0x7a, 0x02, 0x86, 0x53, 0xd8, 0xf6, 0x3d, 0x18,
- 0x9d, 0x8d, 0x22, 0xa7, 0xba, 0x45, 0x6a, 0x7c, 0x41, 0xa1, 0x17, 0xa1, 0xc7, 0x73, 0x1a, 0xb2,
- 0x33, 0xe7, 0x64, 0x67, 0x56, 0x9d, 0x06, 0xed, 0xcc, 0xf8, 0xba, 0xe7, 0xbe, 0xdb, 0x12, 0x8b,
- 0x94, 0x96, 0x61, 0x86, 0x8d, 0x9e, 0x07, 0xa8, 0x91, 0x1d, 0xb7, 0x4a, 0xca, 0x4e, 0xb4, 0x25,
- 0xfa, 0x80, 0x44, 0x5d, 0x58, 0x50, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x61, 0x70, 0x76, 0xc7, 0x77,
- 0x6b, 0x65, 0xbf, 0x16, 0xa2, 0x6d, 0x18, 0x6b, 0x06, 0x64, 0x83, 0x04, 0xaa, 0x68, 0xca, 0x3a,
- 0x57, 0xbc, 0x30, 0xf4, 0xfc, 0x85, 0xcc, 0xb1, 0x37, 0x51, 0x17, 0xbd, 0x28, 0xd8, 0x9d, 0x3b,
- 0x29, 0xda, 0x1b, 0x4b, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0x27, 0x05, 0x38, 0x3e, 0x7b, 0xaf, 0x15,
- 0x90, 0x05, 0x37, 0xdc, 0x4e, 0x6e, 0xb8, 0x9a, 0x1b, 0x6e, 0xaf, 0xc6, 0x23, 0xa0, 0x56, 0xfa,
- 0x82, 0x28, 0xc7, 0x0a, 0x03, 0x3d, 0x07, 0xfd, 0xf4, 0xf7, 0x3a, 0x5e, 0x16, 0x9f, 0x3c, 0x29,
- 0x90, 0x87, 0x16, 0x9c, 0xc8, 0x59, 0xe0, 0x20, 0x2c, 0x71, 0xd0, 0x0a, 0x0c, 0x55, 0xd9, 0xf9,
- 0xb0, 0xb9, 0xe2, 0xd7, 0x08, 0x5b, 0x5b, 0x83, 0x73, 0xcf, 0x50, 0xf4, 0xf9, 0xb8, 0xf8, 0x60,
- 0xaf, 0x34, 0xc5, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47, 0xb6, 0xda, 0xee, 0x3d, 0x8c,
- 0x12, 0x64, 0x6c, 0xf5, 0x0b, 0xda, 0xce, 0xed, 0x65, 0x3b, 0x77, 0x38, 0x7b, 0xd7, 0xa2, 0x4b,
- 0xd0, 0xb3, 0xed, 0x7a, 0xb5, 0xa9, 0x3e, 0x46, 0xeb, 0x0c, 0x9d, 0xf3, 0x6b, 0xae, 0x57, 0x3b,
- 0xd8, 0x2b, 0x4d, 0x18, 0xdd, 0xa1, 0x85, 0x98, 0xa1, 0xda, 0xff, 0xc6, 0x82, 0x12, 0x83, 0x2d,
- 0xb9, 0x75, 0x52, 0x26, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, 0x9f, 0x07, 0x08, 0x49,
- 0x35, 0x20, 0x91, 0x36, 0xa4, 0x6a, 0x61, 0x54, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x7c, 0x0a, 0xb7,
- 0x9c, 0x80, 0xad, 0x2f, 0x31, 0xb0, 0xea, 0x7c, 0xaa, 0x48, 0x00, 0x8e, 0x71, 0x8c, 0xf3, 0xa9,
- 0xd8, 0xe9, 0x7c, 0x42, 0x1f, 0x83, 0xb1, 0xb8, 0xb1, 0xb0, 0xe9, 0x54, 0xe5, 0x00, 0xb2, 0x1d,
- 0x5c, 0x31, 0x41, 0x38, 0x89, 0x6b, 0xff, 0xb7, 0x96, 0x58, 0x3c, 0xf4, 0xab, 0xdf, 0xe7, 0xdf,
- 0x6a, 0xff, 0xae, 0x05, 0xfd, 0x73, 0xae, 0x57, 0x73, 0xbd, 0x4d, 0xf4, 0x19, 0x18, 0xa0, 0x57,
- 0x65, 0xcd, 0x89, 0x1c, 0x71, 0x0c, 0x7f, 0x58, 0xdb, 0x5b, 0xea, 0xe6, 0x9a, 0x69, 0x6e, 0x6f,
- 0xd2, 0x82, 0x70, 0x86, 0x62, 0xd3, 0xdd, 0x76, 0xe3, 0xf6, 0x67, 0x49, 0x35, 0x5a, 0x21, 0x91,
- 0x13, 0x7f, 0x4e, 0x5c, 0x86, 0x15, 0x55, 0x74, 0x0d, 0xfa, 0x22, 0x27, 0xd8, 0x24, 0x91, 0x38,
- 0x8f, 0x33, 0xcf, 0x4d, 0x5e, 0x13, 0xd3, 0x1d, 0x49, 0xbc, 0x2a, 0x89, 0x6f, 0xa9, 0x35, 0x56,
- 0x15, 0x0b, 0x12, 0xf6, 0x7f, 0xe8, 0x87, 0x53, 0xf3, 0x95, 0xe5, 0x9c, 0x75, 0x75, 0x1e, 0xfa,
- 0x6a, 0x81, 0xbb, 0x43, 0x02, 0x31, 0xce, 0x8a, 0xca, 0x02, 0x2b, 0xc5, 0x02, 0x8a, 0x2e, 0xc3,
- 0x30, 0xbf, 0x1f, 0xaf, 0x3a, 0x5e, 0x2d, 0x3e, 0x1e, 0x05, 0xf6, 0xf0, 0x4d, 0x0d, 0x86, 0x0d,
- 0xcc, 0x43, 0x2e, 0xaa, 0xf3, 0x89, 0xcd, 0x98, 0x77, 0xf7, 0x7e, 0xd1, 0x82, 0x71, 0xde, 0xcc,
- 0x6c, 0x14, 0x05, 0xee, 0xed, 0x56, 0x44, 0xc2, 0xa9, 0x5e, 0x76, 0xd2, 0xcd, 0x67, 0x8d, 0x56,
- 0xee, 0x08, 0xcc, 0xdc, 0x4c, 0x50, 0xe1, 0x87, 0xe0, 0x94, 0x68, 0x77, 0x3c, 0x09, 0xc6, 0xa9,
- 0x66, 0xd1, 0x8f, 0x58, 0x30, 0x5d, 0xf5, 0xbd, 0x28, 0xf0, 0xeb, 0x75, 0x12, 0x94, 0x5b, 0xb7,
- 0xeb, 0x6e, 0xb8, 0xc5, 0xd7, 0x29, 0x26, 0x1b, 0xec, 0x24, 0xc8, 0x99, 0x43, 0x85, 0x24, 0xe6,
- 0xf0, 0xec, 0xfe, 0x5e, 0x69, 0x7a, 0x3e, 0x97, 0x14, 0x6e, 0xd3, 0x0c, 0xda, 0x06, 0x44, 0x6f,
- 0xf6, 0x4a, 0xe4, 0x6c, 0x92, 0xb8, 0xf1, 0xfe, 0xee, 0x1b, 0x3f, 0xb1, 0xbf, 0x57, 0x42, 0xab,
- 0x29, 0x12, 0x38, 0x83, 0x2c, 0x7a, 0x17, 0x8e, 0xd1, 0xd2, 0xd4, 0xb7, 0x0e, 0x74, 0xdf, 0xdc,
- 0xd4, 0xfe, 0x5e, 0xe9, 0xd8, 0x6a, 0x06, 0x11, 0x9c, 0x49, 0x1a, 0xfd, 0x90, 0x05, 0xa7, 0xe2,
- 0xcf, 0x5f, 0xbc, 0xdb, 0x74, 0xbc, 0x5a, 0xdc, 0xf0, 0x60, 0xf7, 0x0d, 0xd3, 0x33, 0xf9, 0xd4,
- 0x7c, 0x1e, 0x25, 0x9c, 0xdf, 0x08, 0xf2, 0x60, 0x92, 0x76, 0x2d, 0xd9, 0x36, 0x74, 0xdf, 0xf6,
- 0xc9, 0xfd, 0xbd, 0xd2, 0xe4, 0x6a, 0x9a, 0x06, 0xce, 0x22, 0x3c, 0x3d, 0x0f, 0xc7, 0x33, 0x57,
- 0x27, 0x1a, 0x87, 0xe2, 0x36, 0xe1, 0x4c, 0xe0, 0x20, 0xa6, 0x3f, 0xd1, 0x31, 0xe8, 0xdd, 0x71,
- 0xea, 0x2d, 0xb1, 0x31, 0x31, 0xff, 0xf3, 0x4a, 0xe1, 0xb2, 0x65, 0xff, 0x6f, 0x45, 0x18, 0x9b,
- 0xaf, 0x2c, 0xdf, 0xd7, 0xae, 0xd7, 0xaf, 0xbd, 0x42, 0xdb, 0x6b, 0x2f, 0xbe, 0x44, 0x8b, 0xb9,
- 0x97, 0xe8, 0x0f, 0x66, 0x6c, 0xd9, 0x1e, 0xb6, 0x65, 0x3f, 0x9a, 0xb3, 0x65, 0x1f, 0xf0, 0x46,
- 0xdd, 0xc9, 0x59, 0xb5, 0xbd, 0x6c, 0x02, 0x33, 0x39, 0x24, 0xc6, 0xfb, 0x25, 0x8f, 0xda, 0x43,
- 0x2e, 0xdd, 0x07, 0x33, 0x8f, 0x55, 0x18, 0x9e, 0x77, 0x9a, 0xce, 0x6d, 0xb7, 0xee, 0x46, 0x2e,
- 0x09, 0xd1, 0x93, 0x50, 0x74, 0x6a, 0x35, 0xc6, 0xdd, 0x0d, 0xce, 0x1d, 0xdf, 0xdf, 0x2b, 0x15,
- 0x67, 0x6b, 0x94, 0xcd, 0x00, 0x85, 0xb5, 0x8b, 0x29, 0x06, 0x7a, 0x1a, 0x7a, 0x6a, 0x81, 0xdf,
- 0x9c, 0x2a, 0x30, 0x4c, 0xba, 0xcb, 0x7b, 0x16, 0x02, 0xbf, 0x99, 0x40, 0x65, 0x38, 0xf6, 0x1f,
- 0x17, 0xe0, 0xf4, 0x3c, 0x69, 0x6e, 0x2d, 0x55, 0x72, 0xee, 0x8b, 0x0b, 0x30, 0xd0, 0xf0, 0x3d,
- 0x37, 0xf2, 0x83, 0x50, 0x34, 0xcd, 0x56, 0xc4, 0x8a, 0x28, 0xc3, 0x0a, 0x8a, 0xce, 0x41, 0x4f,
- 0x33, 0x66, 0x62, 0x87, 0x25, 0x03, 0xcc, 0xd8, 0x57, 0x06, 0xa1, 0x18, 0xad, 0x90, 0x04, 0x62,
- 0xc5, 0x28, 0x8c, 0xf5, 0x90, 0x04, 0x98, 0x41, 0x62, 0x4e, 0x80, 0xf2, 0x08, 0xe2, 0x46, 0x48,
- 0x70, 0x02, 0x14, 0x82, 0x35, 0x2c, 0x54, 0x86, 0xc1, 0x30, 0x31, 0xb3, 0x5d, 0x6d, 0xcd, 0x11,
- 0xc6, 0x2a, 0xa8, 0x99, 0x8c, 0x89, 0x18, 0x37, 0x58, 0x5f, 0x47, 0x56, 0xe1, 0x1b, 0x05, 0x40,
- 0x7c, 0x08, 0xbf, 0xcb, 0x06, 0x6e, 0x3d, 0x3d, 0x70, 0xdd, 0x6f, 0x89, 0x07, 0x35, 0x7a, 0xff,
- 0xd6, 0x82, 0xd3, 0xf3, 0xae, 0x57, 0x23, 0x41, 0xce, 0x02, 0x7c, 0x38, 0x4f, 0xf9, 0xc3, 0x31,
- 0x29, 0xc6, 0x12, 0xeb, 0x79, 0x00, 0x4b, 0xcc, 0xfe, 0x47, 0x0b, 0x10, 0xff, 0xec, 0xf7, 0xdd,
- 0xc7, 0xae, 0xa7, 0x3f, 0xf6, 0x01, 0x2c, 0x0b, 0xfb, 0x3a, 0x8c, 0xce, 0xd7, 0x5d, 0xe2, 0x45,
- 0xcb, 0xe5, 0x79, 0xdf, 0xdb, 0x70, 0x37, 0xd1, 0x2b, 0x30, 0x1a, 0xb9, 0x0d, 0xe2, 0xb7, 0xa2,
- 0x0a, 0xa9, 0xfa, 0x1e, 0x7b, 0xb9, 0x5a, 0x17, 0x7a, 0xe7, 0xd0, 0xfe, 0x5e, 0x69, 0x74, 0xcd,
- 0x80, 0xe0, 0x04, 0xa6, 0xfd, 0xcb, 0xf4, 0xdc, 0xaa, 0xb7, 0xc2, 0x88, 0x04, 0x6b, 0x41, 0x2b,
- 0x8c, 0xe6, 0x5a, 0x94, 0xf7, 0x2c, 0x07, 0x3e, 0xed, 0x8e, 0xeb, 0x7b, 0xe8, 0xb4, 0xf1, 0x1c,
- 0x1f, 0x90, 0x4f, 0x71, 0xf1, 0xec, 0x9e, 0x01, 0x08, 0xdd, 0x4d, 0x8f, 0x04, 0xda, 0xf3, 0x61,
- 0x94, 0x6d, 0x15, 0x55, 0x8a, 0x35, 0x0c, 0x54, 0x87, 0x91, 0xba, 0x73, 0x9b, 0xd4, 0x2b, 0xa4,
- 0x4e, 0xaa, 0x91, 0x1f, 0x08, 0xf9, 0xc6, 0x0b, 0xdd, 0xbd, 0x03, 0xae, 0xeb, 0x55, 0xe7, 0x26,
- 0xf6, 0xf7, 0x4a, 0x23, 0x46, 0x11, 0x36, 0x89, 0xd3, 0xa3, 0xc3, 0x6f, 0xd2, 0xaf, 0x70, 0xea,
- 0xfa, 0xe3, 0xf3, 0x86, 0x28, 0xc3, 0x0a, 0xaa, 0x8e, 0x8e, 0x9e, 0xbc, 0xa3, 0xc3, 0xfe, 0x6b,
- 0xba, 0xd0, 0xfc, 0x46, 0xd3, 0xf7, 0x88, 0x17, 0xcd, 0xfb, 0x5e, 0x8d, 0x4b, 0xa6, 0x5e, 0x31,
- 0x44, 0x27, 0xe7, 0x13, 0xa2, 0x93, 0x13, 0xe9, 0x1a, 0x9a, 0xf4, 0xe4, 0xa3, 0xd0, 0x17, 0x46,
- 0x4e, 0xd4, 0x0a, 0xc5, 0xc0, 0x3d, 0x2a, 0x97, 0x5d, 0x85, 0x95, 0x1e, 0xec, 0x95, 0xc6, 0x54,
- 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x14, 0xf4, 0x37, 0x48, 0x18, 0x3a, 0x9b, 0x92, 0x6d, 0x18,
- 0x13, 0x75, 0xfb, 0x57, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x18, 0xf4, 0x92, 0x20, 0xf0, 0x03, 0xf1,
- 0x6d, 0x23, 0x02, 0xb1, 0x77, 0x91, 0x16, 0x62, 0x0e, 0xb3, 0xff, 0x0f, 0x0b, 0xc6, 0x54, 0x5f,
- 0x79, 0x5b, 0x47, 0xf0, 0x5c, 0x7b, 0x1b, 0xa0, 0x2a, 0x3f, 0x30, 0x64, 0xd7, 0xec, 0xd0, 0xf3,
- 0xe7, 0x33, 0x39, 0x9a, 0xd4, 0x30, 0xc6, 0x94, 0x55, 0x51, 0x88, 0x35, 0x6a, 0xf6, 0x1f, 0x58,
- 0x30, 0x99, 0xf8, 0xa2, 0xeb, 0x6e, 0x18, 0xa1, 0x77, 0x52, 0x5f, 0x35, 0xd3, 0xe5, 0xe2, 0x73,
- 0x43, 0xfe, 0x4d, 0x6a, 0xcf, 0xcb, 0x12, 0xed, 0x8b, 0xae, 0x42, 0xaf, 0x1b, 0x91, 0x86, 0xfc,
- 0x98, 0xc7, 0xda, 0x7e, 0x0c, 0xef, 0x55, 0x3c, 0x23, 0xcb, 0xb4, 0x26, 0xe6, 0x04, 0xec, 0x3f,
- 0x2e, 0xc2, 0x20, 0xdf, 0xdf, 0x2b, 0x4e, 0xf3, 0x08, 0xe6, 0xe2, 0x19, 0x18, 0x74, 0x1b, 0x8d,
- 0x56, 0xe4, 0xdc, 0x16, 0xf7, 0xde, 0x00, 0x3f, 0x83, 0x96, 0x65, 0x21, 0x8e, 0xe1, 0x68, 0x19,
- 0x7a, 0x58, 0x57, 0xf8, 0x57, 0x3e, 0x99, 0xfd, 0x95, 0xa2, 0xef, 0x33, 0x0b, 0x4e, 0xe4, 0x70,
- 0x96, 0x53, 0xed, 0x2b, 0x5a, 0x84, 0x19, 0x09, 0xe4, 0x00, 0xdc, 0x76, 0x3d, 0x27, 0xd8, 0xa5,
- 0x65, 0x53, 0x45, 0x46, 0xf0, 0xb9, 0xf6, 0x04, 0xe7, 0x14, 0x3e, 0x27, 0xab, 0x3e, 0x2c, 0x06,
- 0x60, 0x8d, 0xe8, 0xf4, 0xcb, 0x30, 0xa8, 0x90, 0x0f, 0xc3, 0x39, 0x4e, 0x7f, 0x0c, 0xc6, 0x12,
- 0x6d, 0x75, 0xaa, 0x3e, 0xac, 0x33, 0x9e, 0xbf, 0xc7, 0x8e, 0x0c, 0xd1, 0xeb, 0x45, 0x6f, 0x47,
- 0xdc, 0x4d, 0xf7, 0xe0, 0x58, 0x3d, 0xe3, 0xc8, 0x17, 0xf3, 0xda, 0xfd, 0x15, 0x71, 0x5a, 0x7c,
- 0xf6, 0xb1, 0x2c, 0x28, 0xce, 0x6c, 0xc3, 0x38, 0x11, 0x0b, 0xed, 0x4e, 0x44, 0x7a, 0xde, 0x1d,
- 0x53, 0x9d, 0xbf, 0x46, 0x76, 0xd5, 0xa1, 0xfa, 0x9d, 0xec, 0xfe, 0x19, 0x3e, 0xfa, 0xfc, 0xb8,
- 0x1c, 0x12, 0x04, 0x8a, 0xd7, 0xc8, 0x2e, 0x9f, 0x0a, 0xfd, 0xeb, 0x8a, 0x6d, 0xbf, 0xee, 0x6b,
- 0x16, 0x8c, 0xa8, 0xaf, 0x3b, 0x82, 0x73, 0x61, 0xce, 0x3c, 0x17, 0xce, 0xb4, 0x5d, 0xe0, 0x39,
- 0x27, 0xc2, 0x37, 0x0a, 0x70, 0x4a, 0xe1, 0xd0, 0x47, 0x14, 0xff, 0x23, 0x56, 0xd5, 0x45, 0x18,
- 0xf4, 0x94, 0x38, 0xd1, 0x32, 0xe5, 0x78, 0xb1, 0x30, 0x31, 0xc6, 0xa1, 0x57, 0x9e, 0x17, 0x5f,
- 0xda, 0xc3, 0xba, 0x9c, 0x5d, 0x5c, 0xee, 0x73, 0x50, 0x6c, 0xb9, 0x35, 0x71, 0xc1, 0x7c, 0x58,
- 0x8e, 0xf6, 0xfa, 0xf2, 0xc2, 0xc1, 0x5e, 0xe9, 0xd1, 0x3c, 0x95, 0x13, 0xbd, 0xd9, 0xc2, 0x99,
- 0xf5, 0xe5, 0x05, 0x4c, 0x2b, 0xa3, 0x59, 0x18, 0x93, 0x5a, 0xb5, 0x9b, 0x94, 0x2f, 0xf5, 0x3d,
- 0x71, 0x0f, 0x29, 0x61, 0x39, 0x36, 0xc1, 0x38, 0x89, 0x8f, 0x16, 0x60, 0x7c, 0xbb, 0x75, 0x9b,
- 0xd4, 0x49, 0xc4, 0x3f, 0xf8, 0x1a, 0xe1, 0xa2, 0xe4, 0xc1, 0xf8, 0x09, 0x7b, 0x2d, 0x01, 0xc7,
- 0xa9, 0x1a, 0xf6, 0xbf, 0xb2, 0xfb, 0x40, 0x8c, 0x9e, 0xc6, 0xdf, 0x7c, 0x27, 0x97, 0x73, 0x37,
- 0xab, 0xe2, 0x1a, 0xd9, 0x5d, 0xf3, 0x29, 0x1f, 0x92, 0xbd, 0x2a, 0x8c, 0x35, 0xdf, 0xd3, 0x76,
- 0xcd, 0xff, 0x56, 0x01, 0x8e, 0xab, 0x11, 0x30, 0xb8, 0xe5, 0xef, 0xf6, 0x31, 0xb8, 0x04, 0x43,
- 0x35, 0xb2, 0xe1, 0xb4, 0xea, 0x91, 0xd2, 0x6b, 0xf4, 0x72, 0x55, 0xdb, 0x42, 0x5c, 0x8c, 0x75,
- 0x9c, 0x43, 0x0c, 0xdb, 0xaf, 0x8f, 0xb0, 0x8b, 0x38, 0x72, 0xe8, 0x1a, 0x57, 0xbb, 0xc6, 0xca,
- 0xdd, 0x35, 0x8f, 0x41, 0xaf, 0xdb, 0xa0, 0x8c, 0x59, 0xc1, 0xe4, 0xb7, 0x96, 0x69, 0x21, 0xe6,
- 0x30, 0xf4, 0x04, 0xf4, 0x57, 0xfd, 0x46, 0xc3, 0xf1, 0x6a, 0xec, 0xca, 0x1b, 0x9c, 0x1b, 0xa2,
- 0xbc, 0xdb, 0x3c, 0x2f, 0xc2, 0x12, 0x46, 0x99, 0x6f, 0x27, 0xd8, 0xe4, 0xc2, 0x1e, 0xc1, 0x7c,
- 0xcf, 0x06, 0x9b, 0x21, 0x66, 0xa5, 0xf4, 0xad, 0x7a, 0xc7, 0x0f, 0xb6, 0x5d, 0x6f, 0x73, 0xc1,
- 0x0d, 0xc4, 0x96, 0x50, 0x77, 0xe1, 0x2d, 0x05, 0xc1, 0x1a, 0x16, 0x5a, 0x82, 0xde, 0xa6, 0x1f,
- 0x44, 0xe1, 0x54, 0x1f, 0x1b, 0xee, 0x47, 0x73, 0x0e, 0x22, 0xfe, 0xb5, 0x65, 0x3f, 0x88, 0xe2,
- 0x0f, 0xa0, 0xff, 0x42, 0xcc, 0xab, 0xa3, 0xeb, 0xd0, 0x4f, 0xbc, 0x9d, 0xa5, 0xc0, 0x6f, 0x4c,
- 0x4d, 0xe6, 0x53, 0x5a, 0xe4, 0x28, 0x7c, 0x99, 0xc5, 0x3c, 0xaa, 0x28, 0xc6, 0x92, 0x04, 0xfa,
- 0x28, 0x14, 0x89, 0xb7, 0x33, 0xd5, 0xcf, 0x28, 0x4d, 0xe7, 0x50, 0xba, 0xe9, 0x04, 0xf1, 0x99,
- 0xbf, 0xe8, 0xed, 0x60, 0x5a, 0x07, 0x7d, 0x02, 0x06, 0xe5, 0x81, 0x11, 0x0a, 0x29, 0x6a, 0xe6,
- 0x82, 0x95, 0xc7, 0x0c, 0x26, 0xef, 0xb6, 0xdc, 0x80, 0x34, 0x88, 0x17, 0x85, 0xf1, 0x09, 0x29,
- 0xa1, 0x21, 0x8e, 0xa9, 0xa1, 0x2a, 0x0c, 0x07, 0x24, 0x74, 0xef, 0x91, 0xb2, 0x5f, 0x77, 0xab,
- 0xbb, 0x53, 0x27, 0x59, 0xf7, 0x9e, 0x6a, 0x3b, 0x64, 0x58, 0xab, 0x10, 0x4b, 0xf9, 0xf5, 0x52,
- 0x6c, 0x10, 0x45, 0x6f, 0xc1, 0x48, 0x40, 0xc2, 0xc8, 0x09, 0x22, 0xd1, 0xca, 0x94, 0xd2, 0xca,
- 0x8d, 0x60, 0x1d, 0xc0, 0x9f, 0x13, 0x71, 0x33, 0x31, 0x04, 0x9b, 0x14, 0xd0, 0x27, 0xa4, 0xca,
- 0x61, 0xc5, 0x6f, 0x79, 0x51, 0x38, 0x35, 0xc8, 0xfa, 0x9d, 0xa9, 0x9b, 0xbe, 0x19, 0xe3, 0x25,
- 0x75, 0x12, 0xbc, 0x32, 0x36, 0x48, 0xa1, 0x4f, 0xc1, 0x08, 0xff, 0xcf, 0x55, 0xaa, 0xe1, 0xd4,
- 0x71, 0x46, 0xfb, 0x5c, 0x3e, 0x6d, 0x8e, 0x38, 0x77, 0x5c, 0x10, 0x1f, 0xd1, 0x4b, 0x43, 0x6c,
- 0x52, 0x43, 0x18, 0x46, 0xea, 0xee, 0x0e, 0xf1, 0x48, 0x18, 0x96, 0x03, 0xff, 0x36, 0x11, 0x12,
- 0xe2, 0x53, 0xd9, 0x2a, 0x58, 0xff, 0x36, 0x11, 0x8f, 0x40, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0x75,
- 0x18, 0xa5, 0x4f, 0x72, 0x37, 0x26, 0x3a, 0xd4, 0x89, 0x28, 0x7b, 0x38, 0x63, 0xa3, 0x12, 0x4e,
- 0x10, 0x41, 0x37, 0x60, 0x98, 0x8d, 0x79, 0xab, 0xc9, 0x89, 0x9e, 0xe8, 0x44, 0x94, 0x19, 0x14,
- 0x54, 0xb4, 0x2a, 0xd8, 0x20, 0x80, 0xde, 0x84, 0xc1, 0xba, 0xbb, 0x41, 0xaa, 0xbb, 0xd5, 0x3a,
- 0x99, 0x1a, 0x66, 0xd4, 0x32, 0x0f, 0xc3, 0xeb, 0x12, 0x89, 0xf3, 0xe7, 0xea, 0x2f, 0x8e, 0xab,
- 0xa3, 0x9b, 0x70, 0x22, 0x22, 0x41, 0xc3, 0xf5, 0x1c, 0x7a, 0x88, 0x89, 0x27, 0x21, 0xd3, 0x8c,
- 0x8f, 0xb0, 0xd5, 0x75, 0x56, 0xcc, 0xc6, 0x89, 0xb5, 0x4c, 0x2c, 0x9c, 0x53, 0x1b, 0xdd, 0x85,
- 0xa9, 0x0c, 0x08, 0x5f, 0xb7, 0xc7, 0x18, 0xe5, 0xd7, 0x04, 0xe5, 0xa9, 0xb5, 0x1c, 0xbc, 0x83,
- 0x36, 0x30, 0x9c, 0x4b, 0x1d, 0xdd, 0x80, 0x31, 0x76, 0x72, 0x96, 0x5b, 0xf5, 0xba, 0x68, 0x70,
- 0x94, 0x35, 0xf8, 0x84, 0xe4, 0x23, 0x96, 0x4d, 0xf0, 0xc1, 0x5e, 0x09, 0xe2, 0x7f, 0x38, 0x59,
- 0x1b, 0xdd, 0x66, 0x4a, 0xd8, 0x56, 0xe0, 0x46, 0xbb, 0x74, 0x57, 0x91, 0xbb, 0xd1, 0xd4, 0x58,
- 0x5b, 0x81, 0x94, 0x8e, 0xaa, 0x34, 0xb5, 0x7a, 0x21, 0x4e, 0x12, 0xa4, 0x57, 0x41, 0x18, 0xd5,
- 0x5c, 0x6f, 0x6a, 0x9c, 0xbf, 0xa7, 0xe4, 0x49, 0x5a, 0xa1, 0x85, 0x98, 0xc3, 0x98, 0x02, 0x96,
- 0xfe, 0xb8, 0x41, 0x6f, 0xdc, 0x09, 0x86, 0x18, 0x2b, 0x60, 0x25, 0x00, 0xc7, 0x38, 0x94, 0x09,
- 0x8e, 0xa2, 0xdd, 0x29, 0xc4, 0x50, 0xd5, 0x81, 0xb8, 0xb6, 0xf6, 0x09, 0x4c, 0xcb, 0xed, 0xdb,
- 0x30, 0xaa, 0x8e, 0x09, 0x36, 0x26, 0xa8, 0x04, 0xbd, 0x8c, 0xed, 0x13, 0xe2, 0xd3, 0x41, 0xda,
- 0x05, 0xc6, 0x12, 0x62, 0x5e, 0xce, 0xba, 0xe0, 0xde, 0x23, 0x73, 0xbb, 0x11, 0xe1, 0xb2, 0x88,
- 0xa2, 0xd6, 0x05, 0x09, 0xc0, 0x31, 0x8e, 0xfd, 0x1f, 0x39, 0xfb, 0x1c, 0xdf, 0x12, 0x5d, 0xdc,
- 0x8b, 0xcf, 0xc2, 0x00, 0x33, 0xfc, 0xf0, 0x03, 0xae, 0x9d, 0xed, 0x8d, 0x19, 0xe6, 0xab, 0xa2,
- 0x1c, 0x2b, 0x0c, 0xf4, 0x2a, 0x8c, 0x54, 0xf5, 0x06, 0xc4, 0xa5, 0xae, 0x8e, 0x11, 0xa3, 0x75,
- 0x6c, 0xe2, 0xa2, 0xcb, 0x30, 0xc0, 0x6c, 0x9c, 0xaa, 0x7e, 0x5d, 0x70, 0x9b, 0x92, 0x33, 0x19,
- 0x28, 0x8b, 0xf2, 0x03, 0xed, 0x37, 0x56, 0xd8, 0xe8, 0x3c, 0xf4, 0xd1, 0x2e, 0x2c, 0x97, 0xc5,
- 0x75, 0xaa, 0x24, 0x81, 0x57, 0x59, 0x29, 0x16, 0x50, 0xfb, 0x0f, 0x2c, 0xc6, 0x4b, 0xa5, 0xcf,
- 0x7c, 0x74, 0x95, 0x5d, 0x1a, 0xec, 0x06, 0xd1, 0xb4, 0xf0, 0x8f, 0x6b, 0x37, 0x81, 0x82, 0x1d,
- 0x24, 0xfe, 0x63, 0xa3, 0x26, 0x7a, 0x3b, 0x79, 0x33, 0x70, 0x86, 0xe2, 0x45, 0x39, 0x04, 0xc9,
- 0xdb, 0xe1, 0x91, 0xf8, 0x8a, 0xa3, 0xfd, 0x69, 0x77, 0x45, 0xd8, 0x3f, 0x55, 0xd0, 0x56, 0x49,
- 0x25, 0x72, 0x22, 0x82, 0xca, 0xd0, 0x7f, 0xc7, 0x71, 0x23, 0xd7, 0xdb, 0x14, 0x7c, 0x5f, 0xfb,
- 0x8b, 0x8e, 0x55, 0xba, 0xc5, 0x2b, 0x70, 0xee, 0x45, 0xfc, 0xc1, 0x92, 0x0c, 0xa5, 0x18, 0xb4,
- 0x3c, 0x8f, 0x52, 0x2c, 0x74, 0x4b, 0x11, 0xf3, 0x0a, 0x9c, 0xa2, 0xf8, 0x83, 0x25, 0x19, 0xf4,
- 0x0e, 0x80, 0x3c, 0x21, 0x48, 0x4d, 0xc8, 0x0e, 0x9f, 0xed, 0x4c, 0x74, 0x4d, 0xd5, 0xe1, 0xc2,
- 0xc9, 0xf8, 0x3f, 0xd6, 0xe8, 0xd9, 0x91, 0x36, 0xa7, 0x7a, 0x67, 0xd0, 0x27, 0xe9, 0x16, 0x75,
- 0x82, 0x88, 0xd4, 0x66, 0x23, 0x31, 0x38, 0x4f, 0x77, 0xf7, 0x38, 0x5c, 0x73, 0x1b, 0x44, 0xdf,
- 0xce, 0x82, 0x08, 0x8e, 0xe9, 0xd9, 0xbf, 0x53, 0x84, 0xa9, 0xbc, 0xee, 0xd2, 0x4d, 0x43, 0xee,
- 0xba, 0xd1, 0x3c, 0x65, 0x6b, 0x2d, 0x73, 0xd3, 0x2c, 0x8a, 0x72, 0xac, 0x30, 0xe8, 0xea, 0x0d,
- 0xdd, 0x4d, 0xf9, 0xb6, 0xef, 0x8d, 0x57, 0x6f, 0x85, 0x95, 0x62, 0x01, 0xa5, 0x78, 0x01, 0x71,
- 0x42, 0x61, 0x7c, 0xa7, 0xad, 0x72, 0xcc, 0x4a, 0xb1, 0x80, 0xea, 0x52, 0xc6, 0x9e, 0x0e, 0x52,
- 0x46, 0x63, 0x88, 0x7a, 0x1f, 0xec, 0x10, 0xa1, 0x4f, 0x03, 0x6c, 0xb8, 0x9e, 0x1b, 0x6e, 0x31,
- 0xea, 0x7d, 0x87, 0xa6, 0xae, 0x98, 0xe2, 0x25, 0x45, 0x05, 0x6b, 0x14, 0xd1, 0x4b, 0x30, 0xa4,
- 0x0e, 0x90, 0xe5, 0x05, 0xa6, 0xfa, 0xd7, 0x4c, 0xa9, 0xe2, 0xd3, 0x74, 0x01, 0xeb, 0x78, 0xf6,
- 0x67, 0x93, 0xeb, 0x45, 0xec, 0x00, 0x6d, 0x7c, 0xad, 0x6e, 0xc7, 0xb7, 0xd0, 0x7e, 0x7c, 0xed,
- 0x9f, 0x19, 0x84, 0x31, 0xa3, 0xb1, 0x56, 0xd8, 0xc5, 0x99, 0x7b, 0x85, 0x5e, 0x40, 0x4e, 0x44,
- 0xc4, 0xfe, 0xb3, 0x3b, 0x6f, 0x15, 0xfd, 0x92, 0xa2, 0x3b, 0x80, 0xd7, 0x47, 0x9f, 0x86, 0xc1,
- 0xba, 0x13, 0x32, 0x89, 0x25, 0x11, 0xfb, 0xae, 0x1b, 0x62, 0xf1, 0x83, 0xd0, 0x09, 0x23, 0xed,
- 0xd6, 0xe7, 0xb4, 0x63, 0x92, 0xf4, 0xa6, 0xa4, 0xfc, 0x95, 0xb4, 0xee, 0x54, 0x9d, 0xa0, 0x4c,
- 0xd8, 0x2e, 0xe6, 0x30, 0x74, 0x99, 0x1d, 0xad, 0x74, 0x55, 0xcc, 0x53, 0x6e, 0x94, 0x2d, 0xb3,
- 0x5e, 0x83, 0xc9, 0x56, 0x30, 0x6c, 0x60, 0xc6, 0x6f, 0xb2, 0xbe, 0x36, 0x6f, 0xb2, 0xa7, 0xa0,
- 0x9f, 0xfd, 0x50, 0x2b, 0x40, 0xcd, 0xc6, 0x32, 0x2f, 0xc6, 0x12, 0x9e, 0x5c, 0x30, 0x03, 0xdd,
- 0x2d, 0x18, 0xfa, 0xea, 0x13, 0x8b, 0x9a, 0x99, 0x5d, 0x0c, 0xf0, 0x53, 0x4e, 0x2c, 0x79, 0x2c,
- 0x61, 0xe8, 0x57, 0x2c, 0x40, 0x4e, 0x9d, 0xbe, 0x96, 0x69, 0xb1, 0x7a, 0xdc, 0x00, 0x63, 0xb5,
- 0x5f, 0xed, 0x38, 0xec, 0xad, 0x70, 0x66, 0x36, 0x55, 0x9b, 0x4b, 0x4a, 0x5f, 0x11, 0x5d, 0x44,
- 0x69, 0x04, 0xfd, 0x32, 0xba, 0xee, 0x86, 0xd1, 0xe7, 0xff, 0x26, 0x71, 0x39, 0x65, 0x74, 0x09,
- 0xad, 0xeb, 0x8f, 0xaf, 0xa1, 0x43, 0x3e, 0xbe, 0x46, 0x72, 0x1f, 0x5e, 0xdf, 0x9f, 0x78, 0xc0,
- 0x0c, 0xb3, 0x2f, 0x7f, 0xa2, 0xc3, 0x03, 0x46, 0x88, 0xd3, 0xbb, 0x79, 0xc6, 0x94, 0x85, 0x1e,
- 0x78, 0x84, 0x75, 0xb9, 0xfd, 0x23, 0x78, 0x3d, 0x24, 0xc1, 0xdc, 0x29, 0xa9, 0x26, 0x3e, 0xd0,
- 0x79, 0x0f, 0x4d, 0x6f, 0xfc, 0x43, 0x16, 0x4c, 0xa5, 0x07, 0x88, 0x77, 0x69, 0x6a, 0x94, 0xf5,
- 0xdf, 0x6e, 0x37, 0x32, 0xa2, 0xf3, 0xd2, 0xdc, 0x75, 0x6a, 0x36, 0x87, 0x16, 0xce, 0x6d, 0x65,
- 0xba, 0x05, 0x27, 0x73, 0xe6, 0x3d, 0x43, 0x6a, 0xbd, 0xa0, 0x4b, 0xad, 0x3b, 0xc8, 0x3a, 0x67,
- 0xe4, 0xcc, 0xcc, 0xbc, 0xd5, 0x72, 0xbc, 0xc8, 0x8d, 0x76, 0x75, 0x29, 0xb7, 0x07, 0xe6, 0x80,
- 0xa0, 0x4f, 0x41, 0x6f, 0xdd, 0xf5, 0x5a, 0x77, 0xc5, 0x4d, 0x79, 0x3e, 0xfb, 0x11, 0xe3, 0xb5,
- 0xee, 0x9a, 0x43, 0x5c, 0xa2, 0x1b, 0x92, 0x95, 0x1f, 0xec, 0x95, 0x50, 0x1a, 0x01, 0x73, 0xaa,
- 0xf6, 0xd3, 0x30, 0xba, 0xe0, 0x90, 0x86, 0xef, 0x2d, 0x7a, 0xb5, 0xa6, 0xef, 0x7a, 0x11, 0x9a,
- 0x82, 0x1e, 0xc6, 0x22, 0xf2, 0x0b, 0xb2, 0x87, 0x0e, 0x21, 0x66, 0x25, 0xf6, 0x26, 0x1c, 0x5f,
- 0xf0, 0xef, 0x78, 0x77, 0x9c, 0xa0, 0x36, 0x5b, 0x5e, 0xd6, 0xa4, 0x7e, 0xab, 0x52, 0xea, 0x64,
- 0xe5, 0xbf, 0xe9, 0xb5, 0x9a, 0x7c, 0x29, 0x2d, 0xb9, 0x75, 0x92, 0x23, 0x9b, 0xfd, 0x99, 0x82,
- 0xd1, 0x52, 0x8c, 0xaf, 0x34, 0x8b, 0x56, 0xae, 0x51, 0xc2, 0x5b, 0x30, 0xb0, 0xe1, 0x92, 0x7a,
- 0x0d, 0x93, 0x0d, 0x31, 0x1b, 0x4f, 0xe6, 0x9b, 0x2d, 0x2e, 0x51, 0x4c, 0xa5, 0x02, 0x65, 0x32,
- 0xab, 0x25, 0x51, 0x19, 0x2b, 0x32, 0x68, 0x1b, 0xc6, 0xe5, 0x9c, 0x49, 0xa8, 0x38, 0xb5, 0x9f,
- 0x6a, 0xb7, 0x08, 0x4d, 0xe2, 0xcc, 0x84, 0x1b, 0x27, 0xc8, 0xe0, 0x14, 0x61, 0x74, 0x1a, 0x7a,
- 0x1a, 0x94, 0x3f, 0xe9, 0x61, 0xc3, 0xcf, 0x84, 0x54, 0x4c, 0xde, 0xc6, 0x4a, 0xed, 0x9f, 0xb3,
- 0xe0, 0x64, 0x6a, 0x64, 0x84, 0xdc, 0xf1, 0x01, 0xcf, 0x42, 0x52, 0x0e, 0x58, 0xe8, 0x2c, 0x07,
- 0xb4, 0xff, 0x3b, 0x0b, 0x8e, 0x2d, 0x36, 0x9a, 0xd1, 0xee, 0x82, 0x6b, 0x5a, 0x10, 0xbc, 0x0c,
- 0x7d, 0x0d, 0x52, 0x73, 0x5b, 0x0d, 0x31, 0x73, 0x25, 0x79, 0x87, 0xaf, 0xb0, 0x52, 0x7a, 0x0e,
- 0x54, 0x22, 0x3f, 0x70, 0x36, 0x09, 0x2f, 0xc0, 0x02, 0x9d, 0x71, 0x42, 0xee, 0x3d, 0x72, 0xdd,
- 0x6d, 0xb8, 0xd1, 0xfd, 0xed, 0x2e, 0xa1, 0xfc, 0x97, 0x44, 0x70, 0x4c, 0xcf, 0xfe, 0x96, 0x05,
- 0x63, 0x72, 0xdd, 0xcf, 0xd6, 0x6a, 0x01, 0x09, 0x43, 0x34, 0x0d, 0x05, 0xb7, 0x29, 0x7a, 0x09,
- 0xa2, 0x97, 0x85, 0xe5, 0x32, 0x2e, 0xb8, 0x4d, 0xf9, 0xe8, 0x62, 0x6c, 0x42, 0xd1, 0xb4, 0x83,
- 0xb8, 0x2a, 0xca, 0xb1, 0xc2, 0x40, 0x17, 0x60, 0xc0, 0xf3, 0x6b, 0xfc, 0xdd, 0x22, 0x34, 0xe1,
- 0x14, 0x73, 0x55, 0x94, 0x61, 0x05, 0x45, 0x65, 0x18, 0xe4, 0x56, 0xb2, 0xf1, 0xa2, 0xed, 0xca,
- 0xd6, 0x96, 0x7d, 0xd9, 0x9a, 0xac, 0x89, 0x63, 0x22, 0xf6, 0x1f, 0x59, 0x30, 0x2c, 0xbf, 0xac,
- 0xcb, 0x17, 0x25, 0xdd, 0x5a, 0xf1, 0x6b, 0x32, 0xde, 0x5a, 0xf4, 0x45, 0xc8, 0x20, 0xc6, 0x43,
- 0xb0, 0x78, 0xa8, 0x87, 0xe0, 0x25, 0x18, 0x72, 0x9a, 0xcd, 0xb2, 0xf9, 0x8a, 0x64, 0x4b, 0x69,
- 0x36, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0xb3, 0x05, 0x18, 0x95, 0x5f, 0x50, 0x69, 0xdd, 0x0e, 0x49,
- 0x84, 0xd6, 0x60, 0xd0, 0xe1, 0xb3, 0x44, 0xe4, 0x22, 0x7f, 0x2c, 0x5b, 0xba, 0x69, 0x4c, 0x69,
- 0xcc, 0x0e, 0xcf, 0xca, 0xda, 0x38, 0x26, 0x84, 0xea, 0x30, 0xe1, 0xf9, 0x11, 0x63, 0x8d, 0x14,
- 0xbc, 0x9d, 0xc2, 0x39, 0x49, 0xfd, 0x94, 0xa0, 0x3e, 0xb1, 0x9a, 0xa4, 0x82, 0xd3, 0x84, 0xd1,
- 0xa2, 0x94, 0x18, 0x17, 0xf3, 0x45, 0x7d, 0xfa, 0xc4, 0x65, 0x0b, 0x8c, 0xed, 0xdf, 0xb7, 0x60,
- 0x50, 0xa2, 0x1d, 0x85, 0x6d, 0xc1, 0x0a, 0xf4, 0x87, 0x6c, 0x12, 0xe4, 0xd0, 0xd8, 0xed, 0x3a,
- 0xce, 0xe7, 0x2b, 0xe6, 0xf8, 0xf8, 0xff, 0x10, 0x4b, 0x1a, 0x4c, 0x61, 0xa8, 0xba, 0xff, 0x3e,
- 0x51, 0x18, 0xaa, 0xfe, 0xe4, 0x5c, 0x4a, 0x7f, 0xc7, 0xfa, 0xac, 0x49, 0xe0, 0xe9, 0xc3, 0xa4,
- 0x19, 0x90, 0x0d, 0xf7, 0x6e, 0xf2, 0x61, 0x52, 0x66, 0xa5, 0x58, 0x40, 0xd1, 0x3b, 0x30, 0x5c,
- 0x95, 0x9a, 0xa2, 0x78, 0x87, 0x9f, 0x6f, 0xab, 0xb5, 0x54, 0x0a, 0x6e, 0x2e, 0xe9, 0x9c, 0xd7,
- 0xea, 0x63, 0x83, 0x9a, 0x69, 0x05, 0x56, 0xec, 0x64, 0x05, 0x16, 0xd3, 0xcd, 0xb7, 0x89, 0xfa,
- 0x79, 0x0b, 0xfa, 0xb8, 0x86, 0xa0, 0x3b, 0x05, 0x8d, 0xa6, 0xef, 0x8f, 0xc7, 0xee, 0x26, 0x2d,
- 0x14, 0x9c, 0x0d, 0x5a, 0x81, 0x41, 0xf6, 0x83, 0x69, 0x38, 0x8a, 0xf9, 0x3e, 0x63, 0xbc, 0x55,
- 0xbd, 0x83, 0x37, 0x65, 0x35, 0x1c, 0x53, 0xb0, 0x7f, 0xba, 0x48, 0x4f, 0xb7, 0x18, 0xd5, 0xb8,
- 0xf4, 0xad, 0x87, 0x77, 0xe9, 0x17, 0x1e, 0xd6, 0xa5, 0xbf, 0x09, 0x63, 0x55, 0xcd, 0x3a, 0x20,
- 0x9e, 0xc9, 0x0b, 0x6d, 0x17, 0x89, 0x66, 0x48, 0xc0, 0x65, 0xa8, 0xf3, 0x26, 0x11, 0x9c, 0xa4,
- 0x8a, 0x3e, 0x09, 0xc3, 0x7c, 0x9e, 0x45, 0x2b, 0xdc, 0x90, 0xee, 0x89, 0xfc, 0xf5, 0xa2, 0x37,
- 0xc1, 0x65, 0xee, 0x5a, 0x75, 0x6c, 0x10, 0xb3, 0xff, 0xc9, 0x02, 0xb4, 0xd8, 0xdc, 0x22, 0x0d,
- 0x12, 0x38, 0xf5, 0x58, 0xc9, 0xf7, 0x25, 0x0b, 0xa6, 0x48, 0xaa, 0x78, 0xde, 0x6f, 0x34, 0xc4,
- 0x93, 0x3e, 0x47, 0xea, 0xb4, 0x98, 0x53, 0x27, 0x66, 0xeb, 0xf3, 0x30, 0x70, 0x6e, 0x7b, 0x68,
- 0x05, 0x26, 0xf9, 0x2d, 0xa9, 0x00, 0x9a, 0xad, 0xdd, 0x23, 0x82, 0xf0, 0xe4, 0x5a, 0x1a, 0x05,
- 0x67, 0xd5, 0xb3, 0x7f, 0x7f, 0x04, 0x72, 0x7b, 0xf1, 0x81, 0x76, 0xf3, 0x03, 0xed, 0xe6, 0x07,
- 0xda, 0xcd, 0x0f, 0xb4, 0x9b, 0x1f, 0x68, 0x37, 0x3f, 0xd0, 0x6e, 0xbe, 0x4f, 0xb5, 0x9b, 0xff,
- 0xa5, 0x05, 0xc7, 0xd5, 0xf5, 0x65, 0x3c, 0xd8, 0x3f, 0x07, 0x93, 0x7c, 0xbb, 0xcd, 0xd7, 0x1d,
- 0xb7, 0xb1, 0x46, 0x1a, 0xcd, 0xba, 0x13, 0x49, 0x1b, 0xa6, 0x4b, 0x99, 0x2b, 0x37, 0xe1, 0x28,
- 0x61, 0x54, 0xe4, 0x1e, 0x67, 0x19, 0x00, 0x9c, 0xd5, 0x8c, 0xfd, 0x3b, 0x03, 0xd0, 0xbb, 0xb8,
- 0x43, 0xbc, 0xe8, 0x08, 0x9e, 0x36, 0x55, 0x18, 0x75, 0xbd, 0x1d, 0xbf, 0xbe, 0x43, 0x6a, 0x1c,
- 0x7e, 0x98, 0x17, 0xf8, 0x09, 0x41, 0x7a, 0x74, 0xd9, 0x20, 0x81, 0x13, 0x24, 0x1f, 0x86, 0x8e,
- 0xe8, 0x0a, 0xf4, 0xf1, 0xcb, 0x47, 0x28, 0x88, 0x32, 0xcf, 0x6c, 0x36, 0x88, 0xe2, 0x4a, 0x8d,
- 0xf5, 0x57, 0xfc, 0x72, 0x13, 0xd5, 0xd1, 0x67, 0x61, 0x74, 0xc3, 0x0d, 0xc2, 0x68, 0xcd, 0x6d,
- 0xd0, 0xab, 0xa1, 0xd1, 0xbc, 0x0f, 0x9d, 0x90, 0x1a, 0x87, 0x25, 0x83, 0x12, 0x4e, 0x50, 0x46,
- 0x9b, 0x30, 0x52, 0x77, 0xf4, 0xa6, 0xfa, 0x0f, 0xdd, 0x94, 0xba, 0x1d, 0xae, 0xeb, 0x84, 0xb0,
- 0x49, 0x97, 0x6e, 0xa7, 0x2a, 0x53, 0x6b, 0x0c, 0x30, 0x71, 0x86, 0xda, 0x4e, 0x5c, 0x9f, 0xc1,
- 0x61, 0x94, 0x41, 0x63, 0xee, 0x06, 0x83, 0x26, 0x83, 0xa6, 0x39, 0x15, 0x7c, 0x06, 0x06, 0x09,
- 0x1d, 0x42, 0x4a, 0x58, 0x5c, 0x30, 0x17, 0xbb, 0xeb, 0xeb, 0x8a, 0x5b, 0x0d, 0x7c, 0x53, 0x1b,
- 0xb7, 0x28, 0x29, 0xe1, 0x98, 0x28, 0x9a, 0x87, 0xbe, 0x90, 0x04, 0xae, 0x92, 0xf8, 0xb7, 0x99,
- 0x46, 0x86, 0xc6, 0x5d, 0x1a, 0xf9, 0x6f, 0x2c, 0xaa, 0xd2, 0xe5, 0xe5, 0x30, 0x51, 0x2c, 0xbb,
- 0x0c, 0xb4, 0xe5, 0x35, 0xcb, 0x4a, 0xb1, 0x80, 0xa2, 0x37, 0xa1, 0x3f, 0x20, 0x75, 0xa6, 0xee,
- 0x1d, 0xe9, 0x7e, 0x91, 0x73, 0xed, 0x31, 0xaf, 0x87, 0x25, 0x01, 0x74, 0x0d, 0x50, 0x40, 0x28,
- 0x83, 0xe7, 0x7a, 0x9b, 0xca, 0x08, 0x5f, 0x1c, 0xb4, 0x8a, 0x91, 0xc6, 0x31, 0x86, 0xf4, 0x66,
- 0xc5, 0x19, 0xd5, 0xd0, 0x15, 0x98, 0x50, 0xa5, 0xcb, 0x5e, 0x18, 0x39, 0xf4, 0x80, 0x1b, 0x63,
- 0xb4, 0x94, 0x7c, 0x05, 0x27, 0x11, 0x70, 0xba, 0x8e, 0xfd, 0x6b, 0x16, 0xf0, 0x71, 0x3e, 0x02,
- 0xa9, 0xc2, 0xeb, 0xa6, 0x54, 0xe1, 0x54, 0xee, 0xcc, 0xe5, 0x48, 0x14, 0x7e, 0xcd, 0x82, 0x21,
- 0x6d, 0x66, 0xe3, 0x35, 0x6b, 0xb5, 0x59, 0xb3, 0x2d, 0x18, 0xa7, 0x2b, 0xfd, 0xc6, 0xed, 0x90,
- 0x04, 0x3b, 0xa4, 0xc6, 0x16, 0x66, 0xe1, 0xfe, 0x16, 0xa6, 0x32, 0xf8, 0xbd, 0x9e, 0x20, 0x88,
- 0x53, 0x4d, 0xd8, 0x9f, 0x91, 0x5d, 0x55, 0xf6, 0xd1, 0x55, 0x35, 0xe7, 0x09, 0xfb, 0x68, 0x35,
- 0xab, 0x38, 0xc6, 0xa1, 0x5b, 0x6d, 0xcb, 0x0f, 0xa3, 0xa4, 0x7d, 0xf4, 0x55, 0x3f, 0x8c, 0x30,
- 0x83, 0xd8, 0x2f, 0x00, 0x2c, 0xde, 0x25, 0x55, 0xbe, 0x62, 0xf5, 0x47, 0x8f, 0x95, 0xff, 0xe8,
- 0xb1, 0xff, 0xd2, 0x82, 0xd1, 0xa5, 0x79, 0xe3, 0xe6, 0x9a, 0x01, 0xe0, 0x2f, 0xb5, 0x5b, 0xb7,
- 0x56, 0xa5, 0x91, 0x0e, 0xb7, 0x53, 0x50, 0xa5, 0x58, 0xc3, 0x40, 0xa7, 0xa0, 0x58, 0x6f, 0x79,
- 0x42, 0xec, 0xd9, 0x4f, 0xaf, 0xc7, 0xeb, 0x2d, 0x0f, 0xd3, 0x32, 0xcd, 0x93, 0xad, 0xd8, 0xb5,
- 0x27, 0x5b, 0xc7, 0x80, 0x3a, 0xa8, 0x04, 0xbd, 0x77, 0xee, 0xb8, 0x35, 0x1e, 0x27, 0x40, 0x18,
- 0x10, 0xdd, 0xba, 0xb5, 0xbc, 0x10, 0x62, 0x5e, 0x6e, 0x7f, 0xb9, 0x08, 0xd3, 0x4b, 0x75, 0x72,
- 0xf7, 0x3d, 0xc6, 0x4a, 0xe8, 0xd6, 0x0f, 0xef, 0x70, 0x02, 0xa4, 0xc3, 0xfa, 0x5a, 0x76, 0x1e,
- 0x8f, 0x0d, 0xe8, 0xe7, 0xe6, 0xc1, 0x32, 0x72, 0x42, 0xa6, 0x52, 0x36, 0x7f, 0x40, 0x66, 0xb8,
- 0x99, 0xb1, 0x50, 0xca, 0xaa, 0x0b, 0x53, 0x94, 0x62, 0x49, 0x7c, 0xfa, 0x15, 0x18, 0xd6, 0x31,
- 0x0f, 0xe5, 0xf5, 0xfc, 0xc3, 0x45, 0x18, 0xa7, 0x3d, 0x78, 0xa8, 0x13, 0xb1, 0x9e, 0x9e, 0x88,
- 0x07, 0xed, 0xf9, 0xda, 0x79, 0x36, 0xde, 0x49, 0xce, 0xc6, 0xa5, 0xbc, 0xd9, 0x38, 0xea, 0x39,
- 0xf8, 0x11, 0x0b, 0x26, 0x97, 0xea, 0x7e, 0x75, 0x3b, 0xe1, 0x9d, 0xfa, 0x12, 0x0c, 0xd1, 0xe3,
- 0x38, 0x34, 0x02, 0xb5, 0x18, 0xa1, 0x7b, 0x04, 0x08, 0xeb, 0x78, 0x5a, 0xb5, 0xf5, 0xf5, 0xe5,
- 0x85, 0xac, 0x88, 0x3f, 0x02, 0x84, 0x75, 0x3c, 0xfb, 0xcf, 0x2d, 0x38, 0x73, 0x65, 0x7e, 0x31,
- 0x5e, 0x8a, 0xa9, 0xa0, 0x43, 0xe7, 0xa1, 0xaf, 0x59, 0xd3, 0xba, 0x12, 0x8b, 0x85, 0x17, 0x58,
- 0x2f, 0x04, 0xf4, 0xfd, 0x12, 0xdf, 0x6b, 0x1d, 0xe0, 0x0a, 0x2e, 0xcf, 0x8b, 0x73, 0x57, 0x6a,
- 0x81, 0xac, 0x5c, 0x2d, 0xd0, 0x13, 0xd0, 0x4f, 0xef, 0x05, 0xb7, 0x2a, 0xfb, 0xcd, 0xcd, 0x2e,
- 0x78, 0x11, 0x96, 0x30, 0xfb, 0x57, 0x2d, 0x98, 0xbc, 0xe2, 0x46, 0xf4, 0xd2, 0x4e, 0x46, 0xd5,
- 0xa1, 0xb7, 0x76, 0xe8, 0x46, 0x7e, 0xb0, 0x9b, 0x8c, 0xaa, 0x83, 0x15, 0x04, 0x6b, 0x58, 0xfc,
- 0x83, 0x76, 0x5c, 0xe6, 0xef, 0x52, 0x30, 0xf5, 0x6e, 0x58, 0x94, 0x63, 0x85, 0x41, 0xc7, 0xab,
- 0xe6, 0x06, 0x4c, 0x64, 0xb9, 0x2b, 0x0e, 0x6e, 0x35, 0x5e, 0x0b, 0x12, 0x80, 0x63, 0x1c, 0xfb,
- 0x1f, 0x2c, 0x28, 0x5d, 0xe1, 0x5e, 0xbb, 0x1b, 0x61, 0xce, 0xa1, 0xfb, 0x02, 0x0c, 0x12, 0xa9,
- 0x20, 0x10, 0xbd, 0x56, 0x8c, 0xa8, 0xd2, 0x1c, 0xf0, 0xe0, 0x3e, 0x0a, 0xaf, 0x0b, 0x17, 0xfa,
- 0xc3, 0xf9, 0x40, 0x2f, 0x01, 0x22, 0x7a, 0x5b, 0x7a, 0xb4, 0x23, 0x16, 0x36, 0x65, 0x31, 0x05,
- 0xc5, 0x19, 0x35, 0xec, 0x9f, 0xb3, 0xe0, 0xb8, 0xfa, 0xe0, 0xf7, 0xdd, 0x67, 0xda, 0x5f, 0x2f,
- 0xc0, 0xc8, 0xd5, 0xb5, 0xb5, 0xf2, 0x15, 0x12, 0x69, 0xab, 0xb2, 0xbd, 0xda, 0x1f, 0x6b, 0xda,
- 0xcb, 0x76, 0x6f, 0xc4, 0x56, 0xe4, 0xd6, 0x67, 0x78, 0x0c, 0xbf, 0x99, 0x65, 0x2f, 0xba, 0x11,
- 0x54, 0xa2, 0xc0, 0xf5, 0x36, 0x33, 0x57, 0xba, 0xe4, 0x59, 0x8a, 0x79, 0x3c, 0x0b, 0x7a, 0x01,
- 0xfa, 0x58, 0x10, 0x41, 0x39, 0x09, 0x8f, 0xa8, 0x27, 0x16, 0x2b, 0x3d, 0xd8, 0x2b, 0x0d, 0xae,
- 0xe3, 0x65, 0xfe, 0x07, 0x0b, 0x54, 0xb4, 0x0e, 0x43, 0x5b, 0x51, 0xd4, 0xbc, 0x4a, 0x9c, 0x1a,
- 0x09, 0xe4, 0x29, 0x7b, 0x36, 0xeb, 0x94, 0xa5, 0x83, 0xc0, 0xd1, 0xe2, 0x83, 0x29, 0x2e, 0x0b,
- 0xb1, 0x4e, 0xc7, 0xae, 0x00, 0xc4, 0xb0, 0x07, 0xa4, 0xb8, 0xb1, 0xd7, 0x60, 0x90, 0x7e, 0xee,
- 0x6c, 0xdd, 0x75, 0xda, 0xab, 0xc6, 0x9f, 0x81, 0x41, 0xa9, 0xf8, 0x0e, 0x45, 0x88, 0x0f, 0x76,
- 0x23, 0x49, 0xbd, 0x78, 0x88, 0x63, 0xb8, 0xfd, 0x38, 0x08, 0x0b, 0xe0, 0x76, 0x24, 0xed, 0x0d,
- 0x38, 0xc6, 0x4c, 0x99, 0x9d, 0x68, 0xcb, 0x58, 0xa3, 0x9d, 0x17, 0xc3, 0xb3, 0xe2, 0x5d, 0xc7,
- 0xbf, 0x6c, 0x4a, 0x73, 0x21, 0x1f, 0x96, 0x14, 0xe3, 0x37, 0x9e, 0xfd, 0xf7, 0x3d, 0xf0, 0xc8,
- 0x72, 0x25, 0x3f, 0x36, 0xd5, 0x65, 0x18, 0xe6, 0xec, 0x22, 0x5d, 0x1a, 0x4e, 0x5d, 0xb4, 0xab,
- 0x24, 0xa0, 0x6b, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x0c, 0x14, 0xdd, 0x77, 0xbd, 0xa4, 0x83, 0xe5,
- 0xf2, 0x5b, 0xab, 0x98, 0x96, 0x53, 0x30, 0xe5, 0x3c, 0xf9, 0x91, 0xae, 0xc0, 0x8a, 0xfb, 0x7c,
- 0x1d, 0x46, 0xdd, 0xb0, 0x1a, 0xba, 0xcb, 0x1e, 0xdd, 0xa7, 0xda, 0x4e, 0x57, 0x32, 0x07, 0xda,
- 0x69, 0x05, 0xc5, 0x09, 0x6c, 0xed, 0x7e, 0xe9, 0xed, 0x9a, 0x7b, 0xed, 0x18, 0x19, 0x83, 0x1e,
- 0xff, 0x4d, 0xf6, 0x75, 0x21, 0x13, 0xc1, 0x8b, 0xe3, 0x9f, 0x7f, 0x70, 0x88, 0x25, 0x8c, 0x3e,
- 0xe8, 0xaa, 0x5b, 0x4e, 0x73, 0xb6, 0x15, 0x6d, 0x2d, 0xb8, 0x61, 0xd5, 0xdf, 0x21, 0xc1, 0x2e,
- 0x7b, 0x8b, 0x0f, 0xc4, 0x0f, 0x3a, 0x05, 0x98, 0xbf, 0x3a, 0x5b, 0xa6, 0x98, 0x38, 0x5d, 0x07,
- 0xcd, 0xc2, 0x98, 0x2c, 0xac, 0x90, 0x90, 0x5d, 0x01, 0x43, 0x8c, 0x8c, 0x72, 0x79, 0x14, 0xc5,
- 0x8a, 0x48, 0x12, 0xdf, 0x64, 0x70, 0xe1, 0x41, 0x30, 0xb8, 0x2f, 0xc3, 0x88, 0xeb, 0xb9, 0x91,
- 0xeb, 0x44, 0x3e, 0xd7, 0x1f, 0xf1, 0x67, 0x37, 0x13, 0x30, 0x2f, 0xeb, 0x00, 0x6c, 0xe2, 0xd9,
- 0xff, 0x5f, 0x0f, 0x4c, 0xb0, 0x69, 0xfb, 0x60, 0x85, 0x7d, 0x2f, 0xad, 0xb0, 0xf5, 0xf4, 0x0a,
- 0x7b, 0x10, 0x9c, 0xfb, 0x7d, 0x2f, 0xb3, 0x2f, 0x58, 0x30, 0xc1, 0x64, 0xdc, 0xc6, 0x32, 0xbb,
- 0x08, 0x83, 0x81, 0xe1, 0x8d, 0x3a, 0xa8, 0x2b, 0xb5, 0xa4, 0x63, 0x69, 0x8c, 0x83, 0xde, 0x00,
- 0x68, 0xc6, 0x32, 0xf4, 0x82, 0x11, 0x42, 0x14, 0x72, 0xc5, 0xe7, 0x5a, 0x1d, 0xfb, 0xb3, 0x30,
- 0xa8, 0xdc, 0x4d, 0xa5, 0xbf, 0xb9, 0x95, 0xe3, 0x6f, 0xde, 0x99, 0x8d, 0x90, 0xb6, 0x71, 0xc5,
- 0x4c, 0xdb, 0xb8, 0xaf, 0x5a, 0x10, 0x6b, 0x38, 0xd0, 0x5b, 0x30, 0xd8, 0xf4, 0x99, 0x41, 0x74,
- 0x20, 0xbd, 0x0c, 0x1e, 0x6f, 0xab, 0x22, 0xe1, 0x71, 0x02, 0x03, 0x3e, 0x1d, 0x65, 0x59, 0x15,
- 0xc7, 0x54, 0xd0, 0x35, 0xe8, 0x6f, 0x06, 0xa4, 0x12, 0xb1, 0x20, 0x56, 0xdd, 0x13, 0xe4, 0xcb,
- 0x97, 0x57, 0xc4, 0x92, 0x82, 0xfd, 0x1b, 0x05, 0x18, 0x4f, 0xa2, 0xa2, 0xd7, 0xa0, 0x87, 0xdc,
- 0x25, 0x55, 0xd1, 0xdf, 0x4c, 0x9e, 0x20, 0x96, 0x91, 0xf0, 0x01, 0xa0, 0xff, 0x31, 0xab, 0x85,
- 0xae, 0x42, 0x3f, 0x65, 0x08, 0xae, 0xa8, 0x80, 0x8d, 0x8f, 0xe6, 0x31, 0x15, 0x8a, 0xb3, 0xe2,
- 0x9d, 0x13, 0x45, 0x58, 0x56, 0x67, 0x06, 0x69, 0xd5, 0x66, 0x85, 0xbe, 0xb5, 0xa2, 0x76, 0x22,
- 0x81, 0xb5, 0xf9, 0x32, 0x47, 0x12, 0xd4, 0xb8, 0x41, 0x9a, 0x2c, 0xc4, 0x31, 0x11, 0xf4, 0x06,
- 0xf4, 0x86, 0x75, 0x42, 0x9a, 0xc2, 0xe2, 0x20, 0x53, 0xca, 0x59, 0xa1, 0x08, 0x82, 0x12, 0x93,
- 0x8a, 0xb0, 0x02, 0xcc, 0x2b, 0xda, 0xbf, 0x65, 0x01, 0x70, 0x0b, 0x3e, 0xc7, 0xdb, 0x24, 0x47,
- 0xa0, 0x18, 0x58, 0x80, 0x9e, 0xb0, 0x49, 0xaa, 0xed, 0xac, 0xfd, 0xe3, 0xfe, 0x54, 0x9a, 0xa4,
- 0x1a, 0xaf, 0x59, 0xfa, 0x0f, 0xb3, 0xda, 0xf6, 0x8f, 0x02, 0x8c, 0xc6, 0x68, 0xcb, 0x11, 0x69,
- 0xa0, 0xe7, 0x8c, 0x28, 0x37, 0xa7, 0x12, 0x51, 0x6e, 0x06, 0x19, 0xb6, 0x26, 0x83, 0xfe, 0x2c,
- 0x14, 0x1b, 0xce, 0x5d, 0x21, 0x64, 0x7c, 0xa6, 0x7d, 0x37, 0x28, 0xfd, 0x99, 0x15, 0xe7, 0x2e,
- 0x7f, 0x87, 0x3f, 0x23, 0xf7, 0xd8, 0x8a, 0x73, 0xb7, 0xa3, 0x45, 0x3a, 0x6d, 0x84, 0xb5, 0xe5,
- 0x7a, 0xc2, 0x38, 0xad, 0xab, 0xb6, 0x5c, 0x2f, 0xd9, 0x96, 0xeb, 0x75, 0xd1, 0x96, 0xeb, 0xa1,
- 0x7b, 0xd0, 0x2f, 0x6c, 0x47, 0x45, 0xf8, 0xbd, 0x8b, 0x5d, 0xb4, 0x27, 0x4c, 0x4f, 0x79, 0x9b,
- 0x17, 0xa5, 0x9c, 0x41, 0x94, 0x76, 0x6c, 0x57, 0x36, 0x88, 0xfe, 0x2b, 0x0b, 0x46, 0xc5, 0x6f,
- 0x4c, 0xde, 0x6d, 0x91, 0x30, 0x12, 0x7c, 0xf8, 0x47, 0xba, 0xef, 0x83, 0xa8, 0xc8, 0xbb, 0xf2,
- 0x11, 0x79, 0x65, 0x9a, 0xc0, 0x8e, 0x3d, 0x4a, 0xf4, 0x02, 0xfd, 0x86, 0x05, 0xc7, 0x1a, 0xce,
- 0x5d, 0xde, 0x22, 0x2f, 0xc3, 0x4e, 0xe4, 0xfa, 0xc2, 0x06, 0xe3, 0xb5, 0xee, 0xa6, 0x3f, 0x55,
- 0x9d, 0x77, 0x52, 0x2a, 0x5c, 0x8f, 0x65, 0xa1, 0x74, 0xec, 0x6a, 0x66, 0xbf, 0xa6, 0x37, 0x60,
- 0x40, 0xae, 0xb7, 0x87, 0x69, 0x18, 0xcf, 0xda, 0x11, 0x6b, 0xed, 0xa1, 0xb6, 0xf3, 0x59, 0x18,
- 0xd6, 0xd7, 0xd8, 0x43, 0x6d, 0xeb, 0x5d, 0x98, 0xcc, 0x58, 0x4b, 0x0f, 0xb5, 0xc9, 0x3b, 0x70,
- 0x2a, 0x77, 0x7d, 0x3c, 0x54, 0xc7, 0x86, 0xaf, 0x5b, 0xfa, 0x39, 0x78, 0x04, 0xda, 0x99, 0x79,
- 0x53, 0x3b, 0x73, 0xb6, 0xfd, 0xce, 0xc9, 0x51, 0xd1, 0xbc, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8,
- 0x4d, 0xe8, 0xab, 0xd3, 0x12, 0x69, 0x81, 0x6c, 0x77, 0xde, 0x91, 0x31, 0x5f, 0xcc, 0xca, 0x43,
- 0x2c, 0x28, 0xd8, 0x5f, 0xb1, 0x20, 0xc3, 0x35, 0x83, 0xf2, 0x49, 0x2d, 0xb7, 0xc6, 0x86, 0xa4,
- 0x18, 0xf3, 0x49, 0x2a, 0x08, 0xcc, 0x19, 0x28, 0x6e, 0xba, 0x35, 0xe1, 0x59, 0xac, 0xc0, 0x57,
- 0x28, 0x78, 0xd3, 0xad, 0xa1, 0x25, 0x40, 0x61, 0xab, 0xd9, 0xac, 0x33, 0xb3, 0x25, 0xa7, 0x7e,
- 0x25, 0xf0, 0x5b, 0x4d, 0x6e, 0x6e, 0x5c, 0xe4, 0x42, 0xa2, 0x4a, 0x0a, 0x8a, 0x33, 0x6a, 0xd8,
- 0xbf, 0x6b, 0x41, 0xcf, 0x11, 0x4c, 0x13, 0x36, 0xa7, 0xe9, 0xb9, 0x5c, 0xd2, 0x22, 0x6b, 0xc3,
- 0x0c, 0x76, 0xee, 0x2c, 0xde, 0x8d, 0x88, 0x17, 0x32, 0x86, 0x23, 0x73, 0xd6, 0xf6, 0x2c, 0x98,
- 0xbc, 0xee, 0x3b, 0xb5, 0x39, 0xa7, 0xee, 0x78, 0x55, 0x12, 0x2c, 0x7b, 0x9b, 0x87, 0xb2, 0xed,
- 0x2f, 0x74, 0xb4, 0xed, 0xbf, 0x0c, 0x7d, 0x6e, 0x53, 0x0b, 0xfb, 0x7e, 0x8e, 0xce, 0xee, 0x72,
- 0x59, 0x44, 0x7c, 0x47, 0x46, 0xe3, 0xac, 0x14, 0x0b, 0x7c, 0xba, 0x2c, 0xb9, 0x51, 0x5d, 0x4f,
- 0xfe, 0xb2, 0xa4, 0x6f, 0x9d, 0x64, 0x38, 0x33, 0xc3, 0xfc, 0x7b, 0x0b, 0x8c, 0x26, 0x84, 0x07,
- 0x23, 0x86, 0x7e, 0x97, 0x7f, 0xa9, 0x58, 0x9b, 0x4f, 0x66, 0xbf, 0x41, 0x52, 0x03, 0xa3, 0xf9,
- 0xe6, 0xf1, 0x02, 0x2c, 0x09, 0xd9, 0x97, 0x21, 0x33, 0xfc, 0x4c, 0x67, 0xf9, 0x92, 0xfd, 0x09,
- 0x98, 0x60, 0x35, 0x0f, 0x29, 0xbb, 0xb1, 0x13, 0x52, 0xf1, 0x8c, 0x08, 0xbe, 0xf6, 0xff, 0x6d,
- 0x01, 0x5a, 0xf1, 0x6b, 0xee, 0xc6, 0xae, 0x20, 0xce, 0xbf, 0xff, 0x5d, 0x28, 0xf1, 0xc7, 0x71,
- 0x32, 0xca, 0xed, 0x7c, 0xdd, 0x09, 0x43, 0x4d, 0x22, 0xff, 0xa4, 0x68, 0xb7, 0xb4, 0xd6, 0x1e,
- 0x1d, 0x77, 0xa2, 0x87, 0xde, 0x4a, 0x04, 0x1d, 0xfc, 0x68, 0x2a, 0xe8, 0xe0, 0x93, 0x99, 0x76,
- 0x31, 0xe9, 0xde, 0xcb, 0x60, 0x84, 0xf6, 0x17, 0x2d, 0x18, 0x5b, 0x4d, 0x44, 0x6d, 0x3d, 0xcf,
- 0x8c, 0x04, 0x32, 0x34, 0x4d, 0x15, 0x56, 0x8a, 0x05, 0xf4, 0x81, 0x4b, 0x62, 0xff, 0xd5, 0x82,
- 0x38, 0xdc, 0xd5, 0x11, 0xb0, 0xdc, 0xf3, 0x06, 0xcb, 0x9d, 0xf9, 0x7c, 0x51, 0xdd, 0xc9, 0xe3,
- 0xb8, 0xd1, 0x35, 0x35, 0x27, 0x6d, 0x5e, 0x2e, 0x31, 0x19, 0xbe, 0xcf, 0x46, 0xcd, 0x89, 0x53,
- 0xb3, 0xf1, 0xcd, 0x02, 0x20, 0x85, 0xdb, 0x75, 0xa0, 0xca, 0x74, 0x8d, 0x07, 0x13, 0xa8, 0x72,
- 0x07, 0x10, 0x33, 0x73, 0x09, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0x21, 0x7b, 0x3e, 0x9c, 0x0d, 0xcd,
- 0xb4, 0xf4, 0x5c, 0xbd, 0x9e, 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0xcc, 0x97, 0x7a, 0xbb, 0x35, 0x5f,
- 0xea, 0xeb, 0xe0, 0x82, 0xfd, 0x35, 0x0b, 0x46, 0xd4, 0x30, 0xbd, 0x4f, 0x5c, 0x40, 0x54, 0x7f,
- 0x72, 0xee, 0x95, 0xb2, 0xd6, 0x65, 0xc6, 0x0c, 0x7c, 0x1f, 0x73, 0xa5, 0x77, 0xea, 0xee, 0x3d,
- 0xa2, 0xe2, 0x29, 0x97, 0x84, 0x6b, 0xbc, 0x28, 0x3d, 0xd8, 0x2b, 0x8d, 0xa8, 0x7f, 0x3c, 0x82,
- 0x6b, 0x5c, 0xc5, 0xfe, 0x25, 0xba, 0xd9, 0xcd, 0xa5, 0x88, 0x5e, 0x82, 0xde, 0xe6, 0x96, 0x13,
- 0x92, 0x84, 0xab, 0x5c, 0x6f, 0x99, 0x16, 0x1e, 0xec, 0x95, 0x46, 0x55, 0x05, 0x56, 0x82, 0x39,
- 0x76, 0xf7, 0xe1, 0x3f, 0xd3, 0x8b, 0xb3, 0x63, 0xf8, 0xcf, 0x7f, 0xb2, 0xa0, 0x67, 0x95, 0xde,
- 0x5e, 0x0f, 0xff, 0x08, 0x78, 0xdd, 0x38, 0x02, 0x4e, 0xe7, 0x65, 0x16, 0xca, 0xdd, 0xfd, 0x4b,
- 0x89, 0xdd, 0x7f, 0x36, 0x97, 0x42, 0xfb, 0x8d, 0xdf, 0x80, 0x21, 0x96, 0xaf, 0x48, 0xb8, 0x05,
- 0xbe, 0x60, 0x6c, 0xf8, 0x52, 0x62, 0xc3, 0x8f, 0x69, 0xa8, 0xda, 0x4e, 0x7f, 0x0a, 0xfa, 0x85,
- 0x9f, 0x59, 0x32, 0x22, 0x81, 0xc0, 0xc5, 0x12, 0x6e, 0xff, 0x7c, 0x11, 0x8c, 0xfc, 0x48, 0xe8,
- 0xf7, 0x2d, 0x98, 0x09, 0xb8, 0xfd, 0x79, 0x6d, 0xa1, 0x15, 0xb8, 0xde, 0x66, 0xa5, 0xba, 0x45,
- 0x6a, 0xad, 0xba, 0xeb, 0x6d, 0x2e, 0x6f, 0x7a, 0xbe, 0x2a, 0x5e, 0xbc, 0x4b, 0xaa, 0x2d, 0xa6,
- 0x1b, 0xee, 0x90, 0x8c, 0x49, 0xf9, 0x71, 0x3c, 0xbf, 0xbf, 0x57, 0x9a, 0xc1, 0x87, 0xa2, 0x8d,
- 0x0f, 0xd9, 0x17, 0xf4, 0xe7, 0x16, 0x5c, 0xe4, 0x79, 0x7a, 0xba, 0xef, 0x7f, 0x1b, 0x09, 0x47,
- 0x59, 0x92, 0x8a, 0x89, 0xac, 0x91, 0xa0, 0x31, 0xf7, 0xb2, 0x18, 0xd0, 0x8b, 0xe5, 0xc3, 0xb5,
- 0x85, 0x0f, 0xdb, 0x39, 0xfb, 0x7f, 0x2e, 0xc2, 0x88, 0x08, 0x13, 0x29, 0xee, 0x80, 0x97, 0x8c,
- 0x25, 0xf1, 0x68, 0x62, 0x49, 0x4c, 0x18, 0xc8, 0x0f, 0xe6, 0xf8, 0x0f, 0x61, 0x82, 0x1e, 0xce,
- 0x57, 0x89, 0x13, 0x44, 0xb7, 0x89, 0xc3, 0xad, 0x12, 0x8b, 0x87, 0x3e, 0xfd, 0x95, 0x78, 0xfc,
- 0x7a, 0x92, 0x18, 0x4e, 0xd3, 0xff, 0x5e, 0xba, 0x73, 0x3c, 0x18, 0x4f, 0x45, 0xfa, 0x7c, 0x1b,
- 0x06, 0x95, 0x93, 0x94, 0x38, 0x74, 0xda, 0x07, 0xcc, 0x4d, 0x52, 0xe0, 0x42, 0xcf, 0xd8, 0x41,
- 0x2f, 0x26, 0x67, 0xff, 0x66, 0xc1, 0x68, 0x90, 0x4f, 0xe2, 0x2a, 0x0c, 0x38, 0x21, 0x0b, 0xe2,
- 0x5d, 0x6b, 0x27, 0x97, 0x4e, 0x35, 0xc3, 0x1c, 0xd5, 0x66, 0x45, 0x4d, 0xac, 0x68, 0xa0, 0xab,
- 0xdc, 0xf6, 0x73, 0x87, 0xb4, 0x13, 0x4a, 0xa7, 0xa8, 0x81, 0xb4, 0x0e, 0xdd, 0x21, 0x58, 0xd4,
- 0x47, 0x9f, 0xe2, 0xc6, 0xb9, 0xd7, 0x3c, 0xff, 0x8e, 0x77, 0xc5, 0xf7, 0x65, 0x48, 0xa0, 0xee,
- 0x08, 0x4e, 0x48, 0x93, 0x5c, 0x55, 0x1d, 0x9b, 0xd4, 0xba, 0x0b, 0x9d, 0xfd, 0x39, 0x60, 0x79,
- 0x49, 0xcc, 0x98, 0x04, 0x21, 0x22, 0x30, 0x26, 0x62, 0x90, 0xca, 0x32, 0x31, 0x76, 0x99, 0xcf,
- 0x6f, 0xb3, 0x76, 0xac, 0xc7, 0xb9, 0x66, 0x92, 0xc0, 0x49, 0x9a, 0xf6, 0x16, 0x3f, 0x84, 0x97,
- 0x88, 0x13, 0xb5, 0x02, 0x12, 0xa2, 0x8f, 0xc3, 0x54, 0xfa, 0x65, 0x2c, 0xd4, 0x21, 0x16, 0xe3,
- 0x9e, 0x4f, 0xef, 0xef, 0x95, 0xa6, 0x2a, 0x39, 0x38, 0x38, 0xb7, 0xb6, 0xfd, 0x2b, 0x16, 0x30,
- 0x4f, 0xf0, 0x23, 0xe0, 0x7c, 0x3e, 0x66, 0x72, 0x3e, 0x53, 0x79, 0xd3, 0x99, 0xc3, 0xf4, 0xbc,
- 0xc8, 0xd7, 0x70, 0x39, 0xf0, 0xef, 0xee, 0x0a, 0xdb, 0xad, 0xce, 0xcf, 0x38, 0xfb, 0xcb, 0x16,
- 0xb0, 0x24, 0x3e, 0x98, 0xbf, 0xda, 0xa5, 0x82, 0xa3, 0xb3, 0x59, 0xc2, 0xc7, 0x61, 0x60, 0x43,
- 0x0c, 0x7f, 0x86, 0xd0, 0xc9, 0xe8, 0xb0, 0x49, 0x5b, 0x4e, 0x9a, 0xf0, 0xe8, 0x14, 0xff, 0xb0,
- 0xa2, 0x66, 0xff, 0xf7, 0x16, 0x4c, 0xe7, 0x57, 0x43, 0xeb, 0x70, 0x32, 0x20, 0xd5, 0x56, 0x10,
- 0xd2, 0x2d, 0x21, 0x1e, 0x40, 0xc2, 0x29, 0x8a, 0x4f, 0xf5, 0x23, 0xfb, 0x7b, 0xa5, 0x93, 0x38,
- 0x1b, 0x05, 0xe7, 0xd5, 0x45, 0xaf, 0xc0, 0x68, 0x2b, 0xe4, 0x9c, 0x1f, 0x63, 0xba, 0x42, 0x11,
- 0x29, 0x9a, 0xf9, 0x0d, 0xad, 0x1b, 0x10, 0x9c, 0xc0, 0xb4, 0x7f, 0x80, 0x2f, 0x47, 0x15, 0x2c,
- 0xba, 0x01, 0x13, 0x9e, 0xf6, 0x9f, 0xde, 0x80, 0xf2, 0xa9, 0xff, 0x78, 0xa7, 0x5b, 0x9f, 0x5d,
- 0x97, 0x9a, 0xaf, 0x7a, 0x82, 0x0c, 0x4e, 0x53, 0xb6, 0x7f, 0xc1, 0x82, 0x93, 0x3a, 0xa2, 0xe6,
- 0x0e, 0xd7, 0x49, 0x97, 0xb7, 0x00, 0x03, 0x7e, 0x93, 0x04, 0x4e, 0xe4, 0x07, 0xe2, 0x9a, 0xbb,
- 0x20, 0x57, 0xe8, 0x0d, 0x51, 0x7e, 0x20, 0x92, 0xd7, 0x48, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0xc8,
- 0x86, 0x3e, 0x26, 0x40, 0x0c, 0x85, 0xe3, 0x23, 0x3b, 0xb4, 0x98, 0x7d, 0x4a, 0x88, 0x05, 0xc4,
- 0xfe, 0x7b, 0x8b, 0xaf, 0x4f, 0xbd, 0xeb, 0xe8, 0x5d, 0x18, 0x6f, 0x38, 0x51, 0x75, 0x6b, 0xf1,
- 0x6e, 0x33, 0xe0, 0x2a, 0x5a, 0x39, 0x4e, 0xcf, 0x74, 0x1a, 0x27, 0xed, 0x23, 0x63, 0x03, 0xe9,
- 0x95, 0x04, 0x31, 0x9c, 0x22, 0x8f, 0x6e, 0xc3, 0x10, 0x2b, 0x63, 0x3e, 0xbd, 0x61, 0x3b, 0x5e,
- 0x26, 0xaf, 0x35, 0x65, 0xe2, 0xb3, 0x12, 0xd3, 0xc1, 0x3a, 0x51, 0xfb, 0xab, 0x45, 0x7e, 0x68,
- 0xb0, 0xb7, 0xc7, 0x53, 0xd0, 0xdf, 0xf4, 0x6b, 0xf3, 0xcb, 0x0b, 0x58, 0xcc, 0x82, 0xba, 0xf7,
- 0xca, 0xbc, 0x18, 0x4b, 0x38, 0xba, 0x00, 0x03, 0xe2, 0xa7, 0x54, 0xa9, 0xb3, 0x3d, 0x22, 0xf0,
- 0x42, 0xac, 0xa0, 0xe8, 0x79, 0x80, 0x66, 0xe0, 0xef, 0xb8, 0x35, 0x16, 0x89, 0xa9, 0x68, 0x5a,
- 0xe7, 0x95, 0x15, 0x04, 0x6b, 0x58, 0xe8, 0x55, 0x18, 0x69, 0x79, 0x21, 0xe7, 0x9f, 0xb4, 0x78,
- 0xf7, 0xca, 0x6e, 0x6c, 0x5d, 0x07, 0x62, 0x13, 0x17, 0xcd, 0x42, 0x5f, 0xe4, 0x30, 0x6b, 0xb3,
- 0xde, 0x7c, 0x23, 0xfa, 0x35, 0x8a, 0xa1, 0x67, 0x96, 0xa3, 0x15, 0xb0, 0xa8, 0x88, 0xde, 0x96,
- 0xee, 0xf5, 0xfc, 0x26, 0x12, 0xde, 0x2b, 0xdd, 0xdd, 0x5a, 0x9a, 0x73, 0xbd, 0xf0, 0x8a, 0x31,
- 0x68, 0xa1, 0x57, 0x00, 0xc8, 0xdd, 0x88, 0x04, 0x9e, 0x53, 0x57, 0x36, 0xa2, 0x8a, 0x91, 0x59,
- 0xf0, 0x57, 0xfd, 0x68, 0x3d, 0x24, 0x8b, 0x0a, 0x03, 0x6b, 0xd8, 0xf6, 0x8f, 0x0e, 0x01, 0xc4,
- 0x0f, 0x0d, 0x74, 0x0f, 0x06, 0xaa, 0x4e, 0xd3, 0xa9, 0xf2, 0xb4, 0xa9, 0xc5, 0x3c, 0xaf, 0xe7,
- 0xb8, 0xc6, 0xcc, 0xbc, 0x40, 0xe7, 0xca, 0x1b, 0x19, 0x32, 0x7c, 0x40, 0x16, 0x77, 0x54, 0xd8,
- 0xa8, 0xf6, 0xd0, 0x17, 0x2c, 0x18, 0x12, 0x91, 0x8e, 0xd8, 0x0c, 0x15, 0xf2, 0xf5, 0x6d, 0x5a,
- 0xfb, 0xb3, 0x71, 0x0d, 0xde, 0x85, 0x17, 0xe4, 0x0a, 0xd5, 0x20, 0x1d, 0x7b, 0xa1, 0x37, 0x8c,
- 0x3e, 0x2c, 0xdf, 0xb6, 0x45, 0x63, 0x28, 0xd5, 0xdb, 0x76, 0x90, 0x5d, 0x35, 0xfa, 0xb3, 0x76,
- 0xdd, 0x78, 0xd6, 0xf6, 0xe4, 0xfb, 0x0f, 0x1b, 0xfc, 0x76, 0xa7, 0x17, 0x2d, 0x2a, 0xeb, 0xb1,
- 0x44, 0x7a, 0xf3, 0x9d, 0x5e, 0xb5, 0x87, 0x5d, 0x87, 0x38, 0x22, 0x9f, 0x85, 0xb1, 0x9a, 0xc9,
- 0xb5, 0x88, 0x95, 0xf8, 0x64, 0x1e, 0xdd, 0x04, 0x93, 0x13, 0xf3, 0x29, 0x09, 0x00, 0x4e, 0x12,
- 0x46, 0x65, 0x1e, 0x5a, 0x66, 0xd9, 0xdb, 0xf0, 0x85, 0x07, 0x95, 0x9d, 0x3b, 0x97, 0xbb, 0x61,
- 0x44, 0x1a, 0x14, 0x33, 0x66, 0x12, 0x56, 0x45, 0x5d, 0xac, 0xa8, 0xa0, 0x37, 0xa1, 0x8f, 0x79,
- 0x3d, 0x86, 0x53, 0x03, 0xf9, 0x6a, 0x0d, 0x33, 0x12, 0x6a, 0xbc, 0x21, 0xd9, 0xdf, 0x10, 0x0b,
- 0x0a, 0xe8, 0xaa, 0xf4, 0x29, 0x0e, 0x97, 0xbd, 0xf5, 0x90, 0x30, 0x9f, 0xe2, 0xc1, 0xb9, 0xc7,
- 0x63, 0x77, 0x61, 0x5e, 0x9e, 0x99, 0x7f, 0xd6, 0xa8, 0x49, 0xd9, 0x3e, 0xf1, 0x5f, 0xa6, 0xb5,
- 0x15, 0x71, 0xdb, 0x32, 0xbb, 0x67, 0xa6, 0xbe, 0x8d, 0x87, 0xf3, 0xa6, 0x49, 0x02, 0x27, 0x69,
- 0x52, 0x16, 0x9a, 0xef, 0x7a, 0xe1, 0x83, 0xd5, 0xe9, 0xec, 0xe0, 0x92, 0x03, 0x76, 0x1b, 0xf1,
- 0x12, 0x2c, 0xea, 0x23, 0x17, 0xc6, 0x02, 0x83, 0xbd, 0x90, 0xe1, 0xd6, 0xce, 0x77, 0xc7, 0xc4,
- 0x68, 0x81, 0xfc, 0x4d, 0x32, 0x38, 0x49, 0x17, 0xbd, 0xa9, 0x31, 0x4a, 0x23, 0xed, 0x5f, 0xfe,
- 0x9d, 0x58, 0xa3, 0xe9, 0x6d, 0x18, 0x31, 0x0e, 0x9b, 0x87, 0xaa, 0x82, 0xf4, 0x60, 0x3c, 0x79,
- 0xb2, 0x3c, 0x54, 0xcd, 0xe3, 0xdf, 0xf6, 0xc0, 0xa8, 0xb9, 0x13, 0xd0, 0x45, 0x18, 0x14, 0x44,
- 0x54, 0x46, 0x2b, 0xb5, 0xb9, 0x57, 0x24, 0x00, 0xc7, 0x38, 0x2c, 0x91, 0x19, 0xab, 0xae, 0xf9,
- 0x0a, 0xc4, 0x89, 0xcc, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x01, 0x7b, 0xdb, 0xf7, 0x23, 0x75, 0x8f,
- 0xaa, 0xed, 0x32, 0xc7, 0x4a, 0xb1, 0x80, 0xd2, 0xfb, 0x73, 0x9b, 0x04, 0x1e, 0xa9, 0x9b, 0x29,
- 0x1d, 0xd4, 0xfd, 0x79, 0x4d, 0x07, 0x62, 0x13, 0x97, 0x72, 0x01, 0x7e, 0xc8, 0xf6, 0x9f, 0x78,
- 0x26, 0xc7, 0xbe, 0x17, 0x15, 0x1e, 0x45, 0x42, 0xc2, 0xd1, 0x27, 0xe0, 0xa4, 0x0a, 0x9f, 0x28,
- 0x56, 0x97, 0x6c, 0xb1, 0xcf, 0x90, 0x6a, 0x9d, 0x9c, 0xcf, 0x46, 0xc3, 0x79, 0xf5, 0xd1, 0xeb,
- 0x30, 0x2a, 0x9e, 0x52, 0x92, 0x62, 0xbf, 0x69, 0x48, 0x78, 0xcd, 0x80, 0xe2, 0x04, 0xb6, 0x4c,
- 0x4a, 0xc1, 0xde, 0x18, 0x92, 0xc2, 0x40, 0x3a, 0x29, 0x85, 0x0e, 0xc7, 0xa9, 0x1a, 0x68, 0x16,
- 0xc6, 0x38, 0xeb, 0xe8, 0x7a, 0x9b, 0x7c, 0x4e, 0x84, 0x67, 0xa7, 0xda, 0x54, 0x37, 0x4c, 0x30,
- 0x4e, 0xe2, 0xa3, 0xcb, 0x30, 0xec, 0x04, 0xd5, 0x2d, 0x37, 0x22, 0x55, 0xba, 0x33, 0x98, 0x2d,
- 0x9f, 0x66, 0x89, 0x39, 0xab, 0xc1, 0xb0, 0x81, 0x69, 0xdf, 0x83, 0xc9, 0x8c, 0xf0, 0x32, 0x74,
- 0xe1, 0x38, 0x4d, 0x57, 0x7e, 0x53, 0xc2, 0xdd, 0x61, 0xb6, 0xbc, 0x2c, 0xbf, 0x46, 0xc3, 0xa2,
- 0xab, 0x93, 0x85, 0xa1, 0xd1, 0x92, 0x6f, 0xab, 0xd5, 0xb9, 0x24, 0x01, 0x38, 0xc6, 0xb1, 0xff,
- 0xb9, 0x00, 0x63, 0x19, 0x0a, 0x3a, 0x96, 0x00, 0x3a, 0xf1, 0xd2, 0x8a, 0xf3, 0x3d, 0x9b, 0x39,
- 0x4e, 0x0a, 0x87, 0xc8, 0x71, 0x52, 0xec, 0x94, 0xe3, 0xa4, 0xe7, 0xbd, 0xe4, 0x38, 0x31, 0x47,
- 0xac, 0xb7, 0xab, 0x11, 0xcb, 0xc8, 0x8b, 0xd2, 0x77, 0xc8, 0xbc, 0x28, 0xc6, 0xa0, 0xf7, 0x77,
- 0x31, 0xe8, 0x3f, 0x5d, 0x80, 0xf1, 0xa4, 0x6e, 0xef, 0x08, 0xe4, 0xe3, 0x6f, 0x1a, 0xf2, 0xf1,
- 0x0b, 0xdd, 0x78, 0xe2, 0xe7, 0xca, 0xca, 0x71, 0x42, 0x56, 0xfe, 0x74, 0x57, 0xd4, 0xda, 0xcb,
- 0xcd, 0x7f, 0xb1, 0x00, 0xc7, 0x33, 0x55, 0x9e, 0x47, 0x30, 0x36, 0x37, 0x8c, 0xb1, 0x79, 0xae,
- 0xeb, 0x28, 0x05, 0xb9, 0x03, 0x74, 0x2b, 0x31, 0x40, 0x17, 0xbb, 0x27, 0xd9, 0x7e, 0x94, 0xbe,
- 0x55, 0x84, 0xb3, 0x99, 0xf5, 0x62, 0xf1, 0xf2, 0x92, 0x21, 0x5e, 0x7e, 0x3e, 0x21, 0x5e, 0xb6,
- 0xdb, 0xd7, 0x7e, 0x30, 0xf2, 0x66, 0xe1, 0xad, 0xcf, 0x62, 0x8e, 0xdc, 0xa7, 0xac, 0xd9, 0xf0,
- 0xd6, 0x57, 0x84, 0xb0, 0x49, 0xf7, 0x7b, 0x49, 0xc6, 0xfc, 0x67, 0x16, 0x9c, 0xca, 0x9c, 0x9b,
- 0x23, 0x90, 0xf4, 0xad, 0x9a, 0x92, 0xbe, 0xa7, 0xba, 0x5e, 0xad, 0x39, 0xa2, 0xbf, 0x2f, 0xf6,
- 0xe5, 0x7c, 0x0b, 0x13, 0x40, 0xdc, 0x80, 0x21, 0xa7, 0x5a, 0x25, 0x61, 0xb8, 0xe2, 0xd7, 0x54,
- 0x3a, 0x84, 0xe7, 0xd8, 0xf3, 0x30, 0x2e, 0x3e, 0xd8, 0x2b, 0x4d, 0x27, 0x49, 0xc4, 0x60, 0xac,
- 0x53, 0x40, 0x9f, 0x82, 0x81, 0x50, 0x66, 0xb2, 0xec, 0xb9, 0xff, 0x4c, 0x96, 0x8c, 0xc9, 0x55,
- 0x02, 0x16, 0x45, 0x12, 0x7d, 0xbf, 0x1e, 0xfd, 0xa9, 0x8d, 0x68, 0x91, 0x77, 0xf2, 0x3e, 0x62,
- 0x40, 0x3d, 0x0f, 0xb0, 0xa3, 0x5e, 0x32, 0x49, 0xe1, 0x89, 0xf6, 0xc6, 0xd1, 0xb0, 0xd0, 0x1b,
- 0x30, 0x1e, 0xf2, 0xc0, 0xa7, 0xb1, 0x91, 0x0a, 0x5f, 0x8b, 0x2c, 0x76, 0x5c, 0x25, 0x01, 0xc3,
- 0x29, 0x6c, 0xb4, 0x24, 0x5b, 0x65, 0xe6, 0x48, 0x7c, 0x79, 0x9e, 0x8f, 0x5b, 0x14, 0x26, 0x49,
- 0xc7, 0x92, 0x93, 0xc0, 0x86, 0x5f, 0xab, 0x89, 0x3e, 0x05, 0x40, 0x17, 0x91, 0x10, 0xa2, 0xf4,
- 0xe7, 0x1f, 0xa1, 0xf4, 0x6c, 0xa9, 0x65, 0x7a, 0x32, 0x30, 0x37, 0xfb, 0x05, 0x45, 0x04, 0x6b,
- 0x04, 0x91, 0x03, 0x23, 0xf1, 0xbf, 0x38, 0x47, 0xfb, 0x85, 0xdc, 0x16, 0x92, 0xc4, 0x99, 0x82,
- 0x61, 0x41, 0x27, 0x81, 0x4d, 0x8a, 0xe8, 0x93, 0x70, 0x6a, 0x27, 0xd7, 0xf2, 0x87, 0x73, 0x82,
- 0x2c, 0xe9, 0x7a, 0xbe, 0xbd, 0x4f, 0x7e, 0x7d, 0xfb, 0x7f, 0x07, 0x78, 0xa4, 0xcd, 0x49, 0x8f,
- 0x66, 0x4d, 0xad, 0xfd, 0x33, 0x49, 0xc9, 0xc6, 0x74, 0x66, 0x65, 0x43, 0xd4, 0x91, 0xd8, 0x50,
- 0x85, 0xf7, 0xbc, 0xa1, 0x7e, 0xc2, 0xd2, 0x64, 0x4e, 0xdc, 0xa6, 0xfb, 0x63, 0x87, 0xbc, 0xc1,
- 0x1e, 0xa0, 0x10, 0x6a, 0x23, 0x43, 0x92, 0xf3, 0x7c, 0xd7, 0xdd, 0xe9, 0x5e, 0xb4, 0xf3, 0xf5,
- 0xec, 0x80, 0xef, 0x5c, 0xc8, 0x73, 0xe5, 0xb0, 0xdf, 0x7f, 0x54, 0xc1, 0xdf, 0xbf, 0x69, 0xc1,
- 0xa9, 0x54, 0x31, 0xef, 0x03, 0x09, 0x45, 0xb4, 0xbb, 0xd5, 0xf7, 0xdc, 0x79, 0x49, 0x90, 0x7f,
- 0xc3, 0x55, 0xf1, 0x0d, 0xa7, 0x72, 0xf1, 0x92, 0x5d, 0xff, 0xd2, 0xdf, 0x94, 0x26, 0x59, 0x03,
- 0x26, 0x22, 0xce, 0xef, 0x3a, 0x6a, 0xc2, 0xb9, 0x6a, 0x2b, 0x08, 0xe2, 0xc5, 0x9a, 0xb1, 0x39,
- 0xf9, 0x5b, 0xef, 0xf1, 0xfd, 0xbd, 0xd2, 0xb9, 0xf9, 0x0e, 0xb8, 0xb8, 0x23, 0x35, 0xe4, 0x01,
- 0x6a, 0xa4, 0xec, 0xeb, 0xd8, 0x01, 0x90, 0x23, 0x87, 0x49, 0x5b, 0xe3, 0x71, 0x4b, 0xd9, 0x0c,
- 0x2b, 0xbd, 0x0c, 0xca, 0x47, 0x2b, 0x3d, 0xf9, 0xce, 0xc4, 0xa5, 0x9f, 0xbe, 0x0e, 0x67, 0xdb,
- 0x2f, 0xa6, 0x43, 0x85, 0x72, 0xf8, 0x4b, 0x0b, 0xce, 0xb4, 0x8d, 0x17, 0xf6, 0x5d, 0xf8, 0x58,
- 0xb0, 0x3f, 0x6f, 0xc1, 0xa3, 0x99, 0x35, 0x92, 0x4e, 0x78, 0x55, 0x5a, 0xa8, 0x99, 0xa3, 0xc6,
- 0x91, 0x73, 0x24, 0x00, 0xc7, 0x38, 0x86, 0xc5, 0x66, 0xa1, 0xa3, 0xc5, 0xe6, 0x1f, 0x59, 0x90,
- 0xba, 0xea, 0x8f, 0x80, 0xf3, 0x5c, 0x36, 0x39, 0xcf, 0xc7, 0xbb, 0x19, 0xcd, 0x1c, 0xa6, 0xf3,
- 0x1f, 0xc7, 0xe0, 0x44, 0x8e, 0x27, 0xf6, 0x0e, 0x4c, 0x6c, 0x56, 0x89, 0x19, 0x7a, 0xa3, 0x5d,
- 0x48, 0xba, 0xb6, 0x71, 0x3a, 0xe6, 0x8e, 0xef, 0xef, 0x95, 0x26, 0x52, 0x28, 0x38, 0xdd, 0x04,
- 0xfa, 0xbc, 0x05, 0xc7, 0x9c, 0x3b, 0xe1, 0x22, 0x7d, 0x41, 0xb8, 0xd5, 0xb9, 0xba, 0x5f, 0xdd,
- 0xa6, 0x8c, 0x99, 0xdc, 0x56, 0x2f, 0x66, 0x0a, 0xa3, 0x6f, 0x55, 0x52, 0xf8, 0x46, 0xf3, 0x53,
- 0xfb, 0x7b, 0xa5, 0x63, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x84, 0x45, 0xc6, 0x2f, 0x27, 0xda, 0x6a,
- 0x17, 0x1c, 0x26, 0xcb, 0x65, 0x9e, 0xb3, 0xc4, 0x12, 0x82, 0x15, 0x1d, 0xf4, 0x19, 0x18, 0xdc,
- 0x94, 0x71, 0x20, 0x32, 0x58, 0xee, 0x78, 0x20, 0xdb, 0x47, 0xc7, 0xe0, 0x26, 0x30, 0x0a, 0x09,
- 0xc7, 0x44, 0xd1, 0xeb, 0x50, 0xf4, 0x36, 0x42, 0x11, 0xa2, 0x2e, 0xdb, 0x12, 0xd7, 0xb4, 0x75,
- 0xe6, 0x21, 0x98, 0x56, 0x97, 0x2a, 0x98, 0x56, 0x44, 0x57, 0xa1, 0x18, 0xdc, 0xae, 0x09, 0x4d,
- 0x4a, 0xe6, 0x26, 0xc5, 0x73, 0x0b, 0x39, 0xbd, 0x62, 0x94, 0xf0, 0xdc, 0x02, 0xa6, 0x24, 0x50,
- 0x19, 0x7a, 0x99, 0xfb, 0xb2, 0x60, 0x6d, 0x33, 0x9f, 0xf2, 0x6d, 0xc2, 0x00, 0x70, 0x8f, 0x44,
- 0x86, 0x80, 0x39, 0x21, 0xb4, 0x06, 0x7d, 0x55, 0xd7, 0xab, 0x91, 0x40, 0xf0, 0xb2, 0x1f, 0xce,
- 0xd4, 0x99, 0x30, 0x8c, 0x1c, 0x9a, 0x5c, 0x85, 0xc0, 0x30, 0xb0, 0xa0, 0xc5, 0xa8, 0x92, 0xe6,
- 0xd6, 0x86, 0xbc, 0xb1, 0xb2, 0xa9, 0x92, 0xe6, 0xd6, 0x52, 0xa5, 0x2d, 0x55, 0x86, 0x81, 0x05,
- 0x2d, 0xf4, 0x0a, 0x14, 0x36, 0xaa, 0xc2, 0x35, 0x39, 0x53, 0x79, 0x62, 0x46, 0xd1, 0x9a, 0xeb,
- 0xdb, 0xdf, 0x2b, 0x15, 0x96, 0xe6, 0x71, 0x61, 0xa3, 0x8a, 0x56, 0xa1, 0x7f, 0x83, 0xc7, 0xdd,
- 0x11, 0xfa, 0x91, 0x27, 0xb3, 0x43, 0x02, 0xa5, 0x42, 0xf3, 0x70, 0xef, 0x52, 0x01, 0xc0, 0x92,
- 0x08, 0x4b, 0x40, 0xa5, 0xe2, 0x07, 0x89, 0xf0, 0xa5, 0x33, 0x87, 0x8b, 0xf9, 0xc4, 0x9f, 0x1a,
- 0x71, 0x14, 0x22, 0xac, 0x51, 0xa4, 0xab, 0xda, 0xb9, 0xd7, 0x0a, 0x58, 0x6e, 0x0b, 0xa1, 0x1a,
- 0xc9, 0x5c, 0xd5, 0xb3, 0x12, 0xa9, 0xdd, 0xaa, 0x56, 0x48, 0x38, 0x26, 0x8a, 0xb6, 0x61, 0x64,
- 0x27, 0x6c, 0x6e, 0x11, 0xb9, 0xa5, 0x59, 0xd8, 0xbb, 0x1c, 0x6e, 0xf6, 0xa6, 0x40, 0x74, 0x83,
- 0xa8, 0xe5, 0xd4, 0x53, 0xa7, 0x10, 0x7b, 0xd6, 0xdc, 0xd4, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe,
- 0x77, 0x5b, 0xfe, 0xed, 0xdd, 0x88, 0x88, 0xa8, 0xa3, 0x99, 0xc3, 0xff, 0x16, 0x47, 0x49, 0x0f,
- 0xbf, 0x00, 0x60, 0x49, 0x04, 0xdd, 0x14, 0xc3, 0xc3, 0x4e, 0xcf, 0xf1, 0xfc, 0x90, 0xe6, 0xb3,
- 0x12, 0x29, 0x67, 0x50, 0xd8, 0x69, 0x19, 0x93, 0x62, 0xa7, 0x64, 0x73, 0xcb, 0x8f, 0x7c, 0x2f,
- 0x71, 0x42, 0x4f, 0xe4, 0x9f, 0x92, 0xe5, 0x0c, 0xfc, 0xf4, 0x29, 0x99, 0x85, 0x85, 0x33, 0xdb,
- 0x42, 0x35, 0x18, 0x6d, 0xfa, 0x41, 0x74, 0xc7, 0x0f, 0xe4, 0xfa, 0x42, 0x6d, 0x04, 0xa5, 0x06,
- 0xa6, 0x68, 0x91, 0x19, 0xe6, 0x98, 0x10, 0x9c, 0xa0, 0x89, 0x3e, 0x0e, 0xfd, 0x61, 0xd5, 0xa9,
- 0x93, 0xe5, 0x1b, 0x53, 0x93, 0xf9, 0xd7, 0x4f, 0x85, 0xa3, 0xe4, 0xac, 0x2e, 0x1e, 0x36, 0x89,
- 0xa3, 0x60, 0x49, 0x0e, 0x2d, 0x41, 0x2f, 0x4b, 0xec, 0xcc, 0x42, 0xe4, 0xe6, 0x44, 0x66, 0x4f,
- 0xb9, 0xd5, 0xf0, 0xb3, 0x89, 0x15, 0x63, 0x5e, 0x9d, 0xee, 0x01, 0x21, 0x29, 0xf0, 0xc3, 0xa9,
- 0xe3, 0xf9, 0x7b, 0x40, 0x08, 0x18, 0x6e, 0x54, 0xda, 0xed, 0x01, 0x85, 0x84, 0x63, 0xa2, 0xf4,
- 0x64, 0xa6, 0xa7, 0xe9, 0x89, 0x36, 0x26, 0x93, 0xb9, 0x67, 0x29, 0x3b, 0x99, 0xe9, 0x49, 0x4a,
- 0x49, 0xd8, 0x7f, 0x30, 0x90, 0xe6, 0x59, 0x98, 0x84, 0xe9, 0x3f, 0xb7, 0x52, 0x36, 0x13, 0x1f,
- 0xe9, 0x56, 0xe0, 0xfd, 0x00, 0x1f, 0xae, 0x9f, 0xb7, 0xe0, 0x44, 0x33, 0xf3, 0x43, 0x04, 0x03,
- 0xd0, 0x9d, 0xdc, 0x9c, 0x7f, 0xba, 0x0a, 0xa7, 0x9c, 0x0d, 0xc7, 0x39, 0x2d, 0x25, 0x85, 0x03,
- 0xc5, 0xf7, 0x2c, 0x1c, 0x58, 0x81, 0x81, 0x2a, 0x7f, 0xc9, 0xc9, 0x34, 0x00, 0x5d, 0x05, 0x03,
- 0x65, 0xac, 0x84, 0x78, 0x02, 0x6e, 0x60, 0x45, 0x02, 0xfd, 0xa4, 0x05, 0x67, 0x92, 0x5d, 0xc7,
- 0x84, 0x81, 0x85, 0xc1, 0x24, 0x17, 0x6b, 0x2d, 0x89, 0xef, 0x4f, 0xf1, 0xff, 0x06, 0xf2, 0x41,
- 0x27, 0x04, 0xdc, 0xbe, 0x31, 0xb4, 0x90, 0x21, 0x57, 0xeb, 0x33, 0x35, 0x8a, 0x5d, 0xc8, 0xd6,
- 0x5e, 0x84, 0xe1, 0x86, 0xdf, 0xf2, 0x22, 0x61, 0xf7, 0x28, 0x8c, 0xa7, 0x98, 0xd1, 0xd0, 0x8a,
- 0x56, 0x8e, 0x0d, 0xac, 0x84, 0x44, 0x6e, 0xe0, 0xbe, 0x25, 0x72, 0xef, 0xc0, 0xb0, 0xa7, 0xb9,
- 0x04, 0xb4, 0x7b, 0xc1, 0x0a, 0xe9, 0xa2, 0x86, 0xcd, 0x7b, 0xa9, 0x97, 0x60, 0x83, 0x5a, 0x7b,
- 0x69, 0x19, 0xbc, 0x37, 0x69, 0xd9, 0x91, 0x3e, 0x89, 0xed, 0x5f, 0x2f, 0x64, 0xbc, 0x18, 0xb8,
- 0x54, 0xee, 0x35, 0x53, 0x2a, 0x77, 0x3e, 0x29, 0x95, 0x4b, 0xa9, 0xaa, 0x0c, 0x81, 0x5c, 0xf7,
- 0x19, 0x25, 0xbb, 0x0e, 0xf0, 0xfc, 0xc3, 0x16, 0x9c, 0x64, 0xba, 0x0f, 0xda, 0xc0, 0x7b, 0xd6,
- 0x77, 0x30, 0x93, 0xd4, 0xeb, 0xd9, 0xe4, 0x70, 0x5e, 0x3b, 0x76, 0x1d, 0xce, 0x75, 0xba, 0x77,
- 0x99, 0x85, 0x6f, 0x4d, 0x19, 0x47, 0xc4, 0x16, 0xbe, 0xb5, 0xe5, 0x05, 0xcc, 0x20, 0xdd, 0x86,
- 0x2f, 0xb4, 0xff, 0x7f, 0x0b, 0x8a, 0x65, 0xbf, 0x76, 0x04, 0x2f, 0xfa, 0x8f, 0x19, 0x2f, 0xfa,
- 0x47, 0xb2, 0x6f, 0xfc, 0x5a, 0xae, 0xb2, 0x6f, 0x31, 0xa1, 0xec, 0x3b, 0x93, 0x47, 0xa0, 0xbd,
- 0x6a, 0xef, 0x97, 0x8a, 0x30, 0x54, 0xf6, 0x6b, 0x6a, 0x9f, 0xfd, 0xaf, 0xf7, 0xe3, 0xc8, 0x93,
- 0x9b, 0x7d, 0x4a, 0xa3, 0xcc, 0x2c, 0x7a, 0x65, 0xdc, 0x89, 0xef, 0x32, 0x7f, 0x9e, 0x5b, 0xc4,
- 0xdd, 0xdc, 0x8a, 0x48, 0x2d, 0xf9, 0x39, 0x47, 0xe7, 0xcf, 0xf3, 0xed, 0x22, 0x8c, 0x25, 0x5a,
- 0x47, 0x75, 0x18, 0xa9, 0xeb, 0xaa, 0x24, 0xb1, 0x4e, 0xef, 0x4b, 0x0b, 0x25, 0xfc, 0x21, 0xb4,
- 0x22, 0x6c, 0x12, 0x47, 0x33, 0x00, 0x9e, 0x6e, 0x15, 0xae, 0x02, 0x15, 0x6b, 0x16, 0xe1, 0x1a,
- 0x06, 0x7a, 0x09, 0x86, 0x22, 0xbf, 0xe9, 0xd7, 0xfd, 0xcd, 0xdd, 0x6b, 0x44, 0x46, 0xb6, 0x54,
- 0x46, 0xc3, 0x6b, 0x31, 0x08, 0xeb, 0x78, 0xe8, 0x2e, 0x4c, 0x28, 0x22, 0x95, 0x07, 0xa0, 0x5e,
- 0x63, 0x62, 0x93, 0xd5, 0x24, 0x45, 0x9c, 0x6e, 0x04, 0xbd, 0x02, 0xa3, 0xcc, 0x7a, 0x99, 0xd5,
- 0xbf, 0x46, 0x76, 0x65, 0xc4, 0x63, 0xc6, 0x61, 0xaf, 0x18, 0x10, 0x9c, 0xc0, 0x44, 0xf3, 0x30,
- 0xd1, 0x70, 0xc3, 0x44, 0xf5, 0x3e, 0x56, 0x9d, 0x75, 0x60, 0x25, 0x09, 0xc4, 0x69, 0x7c, 0xfb,
- 0x57, 0xc5, 0x1c, 0x7b, 0x91, 0xfb, 0xc1, 0x76, 0x7c, 0x7f, 0x6f, 0xc7, 0x6f, 0x59, 0x30, 0x4e,
- 0x5b, 0x67, 0x26, 0x99, 0x92, 0x91, 0x52, 0x39, 0x31, 0xac, 0x36, 0x39, 0x31, 0xce, 0xd3, 0x63,
- 0xbb, 0xe6, 0xb7, 0x22, 0x21, 0x1d, 0xd5, 0xce, 0x65, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, 0x20,
- 0x10, 0x7e, 0xef, 0x3a, 0x1e, 0x09, 0x02, 0x2c, 0xa0, 0x32, 0x65, 0x46, 0x4f, 0x76, 0xca, 0x0c,
- 0x1e, 0xf9, 0x5c, 0x58, 0xc1, 0x09, 0x96, 0x56, 0x8b, 0x7c, 0x2e, 0xcd, 0xe3, 0x62, 0x1c, 0xfb,
- 0xeb, 0x45, 0x18, 0x2e, 0xfb, 0xb5, 0xd8, 0xb0, 0xe3, 0x45, 0xc3, 0xb0, 0xe3, 0x5c, 0xc2, 0xb0,
- 0x63, 0x5c, 0xc7, 0xfd, 0xc0, 0x8c, 0xe3, 0x3b, 0x65, 0xc6, 0xf1, 0x87, 0x16, 0x9b, 0xb5, 0x85,
- 0xd5, 0x0a, 0xb7, 0xf0, 0x45, 0x97, 0x60, 0x88, 0x9d, 0x70, 0x2c, 0xd0, 0x82, 0xb4, 0x76, 0x60,
- 0x29, 0x2c, 0x57, 0xe3, 0x62, 0xac, 0xe3, 0xa0, 0x0b, 0x30, 0x10, 0x12, 0x27, 0xa8, 0x6e, 0xa9,
- 0xe3, 0x5d, 0x98, 0x26, 0xf0, 0x32, 0xac, 0xa0, 0xe8, 0xad, 0x38, 0xe8, 0x76, 0x31, 0xdf, 0x5c,
- 0x58, 0xef, 0x0f, 0xdf, 0x22, 0xf9, 0x91, 0xb6, 0xed, 0x5b, 0x80, 0xd2, 0xf8, 0x5d, 0xf8, 0x5f,
- 0x95, 0xcc, 0xb0, 0xb0, 0x83, 0xa9, 0x90, 0xb0, 0xff, 0x62, 0xc1, 0x68, 0xd9, 0xaf, 0xd1, 0xad,
- 0xfb, 0xbd, 0xb4, 0x4f, 0xf5, 0x8c, 0x03, 0x7d, 0x6d, 0x32, 0x0e, 0x3c, 0x06, 0xbd, 0x65, 0xbf,
- 0xd6, 0x21, 0x74, 0xed, 0x7f, 0x63, 0x41, 0x7f, 0xd9, 0xaf, 0x1d, 0x81, 0xe2, 0xe5, 0x35, 0x53,
- 0xf1, 0x72, 0x32, 0x67, 0xdd, 0xe4, 0xe8, 0x5a, 0xfe, 0xa4, 0x07, 0x46, 0x68, 0x3f, 0xfd, 0x4d,
- 0x39, 0x95, 0xc6, 0xb0, 0x59, 0x5d, 0x0c, 0x1b, 0x7d, 0x06, 0xf8, 0xf5, 0xba, 0x7f, 0x27, 0x39,
- 0xad, 0x4b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x16, 0x06, 0x9a, 0x01, 0xd9, 0x71, 0x7d, 0xc1, 0x5f,
- 0x6b, 0x6a, 0xac, 0xb2, 0x28, 0xc7, 0x0a, 0x83, 0x3e, 0xbc, 0x43, 0xd7, 0xa3, 0xbc, 0x44, 0xd5,
- 0xf7, 0x6a, 0x5c, 0x37, 0x51, 0x14, 0x69, 0xb1, 0xb4, 0x72, 0x6c, 0x60, 0xa1, 0x5b, 0x30, 0xc8,
- 0xfe, 0xb3, 0x63, 0xa7, 0xf7, 0xd0, 0xc7, 0x8e, 0x48, 0x14, 0x2c, 0x08, 0xe0, 0x98, 0x16, 0x7a,
- 0x1e, 0x20, 0x92, 0xa9, 0x65, 0x42, 0x11, 0xc2, 0x54, 0xbd, 0x45, 0x54, 0xd2, 0x99, 0x10, 0x6b,
- 0x58, 0xe8, 0x19, 0x18, 0x8c, 0x1c, 0xb7, 0x7e, 0xdd, 0xf5, 0x98, 0xfe, 0x9e, 0xf6, 0x5f, 0xe4,
- 0xeb, 0x15, 0x85, 0x38, 0x86, 0x53, 0x5e, 0x90, 0xc5, 0x84, 0x9a, 0xdb, 0x8d, 0x44, 0x6a, 0xba,
- 0x22, 0xe7, 0x05, 0xaf, 0xab, 0x52, 0xac, 0x61, 0xa0, 0x2d, 0x38, 0xed, 0x7a, 0x2c, 0x85, 0x14,
- 0xa9, 0x6c, 0xbb, 0xcd, 0xb5, 0xeb, 0x95, 0x9b, 0x24, 0x70, 0x37, 0x76, 0xe7, 0x9c, 0xea, 0x36,
- 0xf1, 0x64, 0x42, 0xfc, 0xc7, 0x45, 0x17, 0x4f, 0x2f, 0xb7, 0xc1, 0xc5, 0x6d, 0x29, 0x21, 0x9b,
- 0x6e, 0xc7, 0x80, 0x38, 0x0d, 0x21, 0x13, 0xe0, 0xe9, 0x67, 0x58, 0x09, 0x16, 0x10, 0xfb, 0x05,
- 0xb6, 0x27, 0x6e, 0x54, 0xd0, 0xd3, 0xc6, 0xf1, 0x72, 0x42, 0x3f, 0x5e, 0x0e, 0xf6, 0x4a, 0x7d,
- 0x37, 0x2a, 0x5a, 0x7c, 0xa0, 0xcb, 0x70, 0xbc, 0xec, 0xd7, 0xca, 0x7e, 0x10, 0x2d, 0xf9, 0xc1,
- 0x1d, 0x27, 0xa8, 0xc9, 0x25, 0x58, 0x92, 0x11, 0x92, 0xe8, 0x19, 0xdb, 0xcb, 0x4f, 0x20, 0x23,
- 0xfa, 0xd1, 0x0b, 0x8c, 0xab, 0x3b, 0xa4, 0x43, 0x6a, 0x95, 0xf1, 0x17, 0x2a, 0x51, 0xdb, 0x15,
- 0x27, 0x22, 0xe8, 0x06, 0x8c, 0x54, 0xf5, 0xab, 0x56, 0x54, 0x7f, 0x4a, 0x5e, 0x76, 0xc6, 0x3d,
- 0x9c, 0x79, 0x37, 0x9b, 0xf5, 0xed, 0x6f, 0x5a, 0xa2, 0x15, 0x2e, 0xad, 0xe0, 0x76, 0xaf, 0x9d,
- 0xcf, 0xdc, 0x79, 0x98, 0x08, 0xf4, 0x2a, 0x9a, 0xfd, 0xd8, 0x71, 0x9e, 0xf9, 0x26, 0x01, 0xc4,
- 0x69, 0x7c, 0xf4, 0x49, 0x38, 0x65, 0x14, 0x4a, 0x55, 0xba, 0x96, 0x7f, 0x9a, 0xc9, 0x73, 0x70,
- 0x1e, 0x12, 0xce, 0xaf, 0x6f, 0xff, 0x20, 0x9c, 0x48, 0x7e, 0x97, 0x90, 0xb0, 0xdc, 0xe7, 0xd7,
- 0x15, 0x0e, 0xf7, 0x75, 0xf6, 0x4b, 0x30, 0x41, 0x9f, 0xde, 0x8a, 0x8d, 0x64, 0xf3, 0xd7, 0x39,
- 0x08, 0xd5, 0x6f, 0x0e, 0xb0, 0x6b, 0x30, 0x91, 0x7d, 0x0d, 0x7d, 0x1a, 0x46, 0x43, 0xc2, 0x22,
- 0xaf, 0x49, 0xc9, 0x5e, 0x1b, 0x6f, 0xf2, 0xca, 0xa2, 0x8e, 0xc9, 0x5f, 0x2f, 0x66, 0x19, 0x4e,
- 0x50, 0x43, 0x0d, 0x18, 0xbd, 0xe3, 0x7a, 0x35, 0xff, 0x4e, 0x28, 0xe9, 0x0f, 0xe4, 0xab, 0x09,
- 0x6e, 0x71, 0xcc, 0x44, 0x1f, 0x8d, 0xe6, 0x6e, 0x19, 0xc4, 0x70, 0x82, 0x38, 0x3d, 0x6a, 0x82,
- 0x96, 0x37, 0x1b, 0xae, 0x87, 0x24, 0x10, 0x71, 0xe1, 0xd8, 0x51, 0x83, 0x65, 0x21, 0x8e, 0xe1,
- 0xf4, 0xa8, 0x61, 0x7f, 0x98, 0x3b, 0x3a, 0x3b, 0xcb, 0xc4, 0x51, 0x83, 0x55, 0x29, 0xd6, 0x30,
- 0xe8, 0x51, 0xcc, 0xfe, 0xad, 0xfa, 0x1e, 0xf6, 0xfd, 0x48, 0x1e, 0xde, 0x2c, 0x55, 0xa5, 0x56,
- 0x8e, 0x0d, 0xac, 0x9c, 0x28, 0x74, 0x3d, 0x87, 0x8d, 0x42, 0x87, 0xa2, 0x36, 0x1e, 0xf8, 0x3c,
- 0x1a, 0xf2, 0xe5, 0x76, 0x1e, 0xf8, 0x07, 0xf7, 0xe5, 0x9d, 0x4f, 0x79, 0x81, 0x0d, 0x31, 0x40,
- 0xbd, 0x3c, 0xcc, 0x1e, 0x53, 0x64, 0x56, 0xf8, 0xe8, 0x48, 0x18, 0x5a, 0x84, 0xfe, 0x70, 0x37,
- 0xac, 0x46, 0xf5, 0xb0, 0x5d, 0x3a, 0xd2, 0x0a, 0x43, 0xd1, 0xb2, 0x61, 0xf3, 0x2a, 0x58, 0xd6,
- 0x45, 0x55, 0x98, 0x14, 0x14, 0xe7, 0xb7, 0x1c, 0x4f, 0x25, 0x49, 0xe4, 0x16, 0x8b, 0x97, 0xf6,
- 0xf7, 0x4a, 0x93, 0xa2, 0x65, 0x1d, 0x7c, 0xb0, 0x57, 0xa2, 0x5b, 0x32, 0x03, 0x82, 0xb3, 0xa8,
- 0xf1, 0x25, 0x5f, 0xad, 0xfa, 0x8d, 0x66, 0x39, 0xf0, 0x37, 0xdc, 0x3a, 0x69, 0xa7, 0x0c, 0xae,
- 0x18, 0x98, 0x62, 0xc9, 0x1b, 0x65, 0x38, 0x41, 0x0d, 0xdd, 0x86, 0x31, 0xa7, 0xd9, 0x9c, 0x0d,
- 0x1a, 0x7e, 0x20, 0x1b, 0x18, 0xca, 0xd7, 0x2a, 0xcc, 0x9a, 0xa8, 0x3c, 0x47, 0x62, 0xa2, 0x10,
- 0x27, 0x09, 0xd2, 0x81, 0x12, 0x1b, 0xcd, 0x18, 0xa8, 0x91, 0x78, 0xa0, 0xc4, 0xbe, 0xcc, 0x18,
- 0xa8, 0x0c, 0x08, 0xce, 0xa2, 0x66, 0xff, 0x00, 0x63, 0xfc, 0x2b, 0xee, 0xa6, 0xc7, 0x9c, 0xe3,
- 0x50, 0x03, 0x46, 0x9a, 0xec, 0xd8, 0x17, 0xf9, 0xcb, 0xc4, 0x51, 0xf1, 0x62, 0x97, 0xc2, 0xcb,
- 0x3b, 0x2c, 0x03, 0xab, 0x61, 0xc4, 0x5a, 0xd6, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0x8b, 0xd3, 0x8c,
- 0x75, 0xac, 0x70, 0x89, 0x64, 0xbf, 0x70, 0x55, 0x14, 0x32, 0x88, 0xe9, 0x7c, 0xd9, 0x7f, 0xbc,
- 0xbe, 0x84, 0xbb, 0x23, 0x96, 0x75, 0xd1, 0xa7, 0x60, 0x94, 0x3e, 0xe9, 0x15, 0xfb, 0x16, 0x4e,
- 0x1d, 0xcb, 0x8f, 0x81, 0xa5, 0xb0, 0xf4, 0xdc, 0x86, 0x7a, 0x65, 0x9c, 0x20, 0x86, 0xde, 0x62,
- 0x76, 0x9d, 0x92, 0x74, 0xa1, 0x1b, 0xd2, 0xba, 0x09, 0xa7, 0x24, 0xab, 0x11, 0x41, 0x2d, 0x98,
- 0x4c, 0x67, 0x70, 0x0e, 0xa7, 0xec, 0xfc, 0xb7, 0x51, 0x3a, 0x09, 0x73, 0x9c, 0x84, 0x2e, 0x0d,
- 0x0b, 0x71, 0x16, 0x7d, 0x74, 0x3d, 0x99, 0x5f, 0xb7, 0x68, 0x68, 0x0d, 0x52, 0x39, 0x76, 0x47,
- 0xda, 0xa6, 0xd6, 0xdd, 0x84, 0x33, 0x5a, 0x8a, 0xd2, 0x2b, 0x81, 0xc3, 0xec, 0x8a, 0x5c, 0x76,
- 0x1b, 0x69, 0x4c, 0xed, 0xa3, 0xfb, 0x7b, 0xa5, 0x33, 0x6b, 0xed, 0x10, 0x71, 0x7b, 0x3a, 0xe8,
- 0x06, 0x1c, 0xe7, 0x11, 0x5c, 0x16, 0x88, 0x53, 0xab, 0xbb, 0x9e, 0xe2, 0x9a, 0xf9, 0xd9, 0x75,
- 0x6a, 0x7f, 0xaf, 0x74, 0x7c, 0x36, 0x0b, 0x01, 0x67, 0xd7, 0x43, 0xaf, 0xc1, 0x60, 0xcd, 0x93,
- 0xa7, 0x6c, 0x9f, 0x91, 0x05, 0x76, 0x70, 0x61, 0xb5, 0xa2, 0xbe, 0x3f, 0xfe, 0x83, 0xe3, 0x0a,
- 0x68, 0x93, 0xab, 0xad, 0x94, 0xac, 0xb1, 0x3f, 0x15, 0xd8, 0x33, 0x29, 0x8e, 0x37, 0x42, 0x22,
- 0x70, 0x7d, 0xad, 0x72, 0xb9, 0x33, 0xa2, 0x25, 0x18, 0x84, 0xd1, 0x9b, 0x80, 0x44, 0xb6, 0xa1,
- 0xd9, 0x2a, 0x4b, 0x8e, 0xa7, 0xd9, 0x92, 0x2a, 0x11, 0x42, 0x25, 0x85, 0x81, 0x33, 0x6a, 0xa1,
- 0xab, 0xf4, 0x78, 0xd4, 0x4b, 0xc5, 0xf1, 0xab, 0x72, 0x8d, 0x2f, 0x90, 0x66, 0x40, 0x98, 0xf9,
- 0xa3, 0x49, 0x11, 0x27, 0xea, 0xa1, 0x1a, 0x9c, 0x76, 0x5a, 0x91, 0xcf, 0x34, 0x82, 0x26, 0xea,
- 0x9a, 0xbf, 0x4d, 0x3c, 0xa6, 0x8c, 0x1f, 0x60, 0x01, 0x43, 0x4f, 0xcf, 0xb6, 0xc1, 0xc3, 0x6d,
- 0xa9, 0xd0, 0xe7, 0x14, 0x1d, 0x0b, 0x4d, 0x59, 0x67, 0x78, 0x77, 0x73, 0x0d, 0xb6, 0xc4, 0x40,
- 0x2f, 0xc1, 0xd0, 0x96, 0x1f, 0x46, 0xab, 0x24, 0xba, 0xe3, 0x07, 0xdb, 0x22, 0xbd, 0x41, 0x9c,
- 0x52, 0x26, 0x06, 0x61, 0x1d, 0x0f, 0x3d, 0x05, 0xfd, 0xcc, 0x54, 0x6c, 0x79, 0x81, 0xdd, 0xb5,
- 0x03, 0xf1, 0x19, 0x73, 0x95, 0x17, 0x63, 0x09, 0x97, 0xa8, 0xcb, 0xe5, 0x79, 0x76, 0x1c, 0x27,
- 0x50, 0x97, 0xcb, 0xf3, 0x58, 0xc2, 0xe9, 0x72, 0x0d, 0xb7, 0x9c, 0x80, 0x94, 0x03, 0xbf, 0x4a,
- 0x42, 0x2d, 0x91, 0xd1, 0x23, 0x3c, 0x79, 0x03, 0x5d, 0xae, 0x95, 0x2c, 0x04, 0x9c, 0x5d, 0x0f,
- 0x91, 0x74, 0x7a, 0xde, 0xd1, 0x7c, 0x55, 0x69, 0x9a, 0x1d, 0xec, 0x32, 0x43, 0xaf, 0x07, 0xe3,
- 0x2a, 0x31, 0x30, 0x4f, 0xd7, 0x10, 0x4e, 0x8d, 0xb1, 0xb5, 0xdd, 0x7d, 0xae, 0x07, 0xa5, 0x7c,
- 0x5e, 0x4e, 0x50, 0xc2, 0x29, 0xda, 0x46, 0x44, 0xda, 0xf1, 0x8e, 0x11, 0x69, 0x2f, 0xc2, 0x60,
- 0xd8, 0xba, 0x5d, 0xf3, 0x1b, 0x8e, 0xeb, 0x31, 0x8b, 0x1b, 0xed, 0xe1, 0x5e, 0x91, 0x00, 0x1c,
- 0xe3, 0xa0, 0x25, 0x18, 0x70, 0xa4, 0x66, 0x19, 0xe5, 0x07, 0xdb, 0x53, 0xfa, 0x64, 0x1e, 0x7f,
- 0x4a, 0xea, 0x92, 0x55, 0x5d, 0xf4, 0x2a, 0x8c, 0x88, 0x80, 0x1e, 0x22, 0x97, 0xfe, 0xa4, 0xe9,
- 0xbe, 0x5c, 0xd1, 0x81, 0xd8, 0xc4, 0x45, 0xeb, 0x30, 0x14, 0xf9, 0x75, 0xe6, 0x83, 0x4b, 0xb9,
- 0xe4, 0x13, 0xf9, 0x31, 0x71, 0xd7, 0x14, 0x9a, 0xae, 0xf3, 0x50, 0x55, 0xb1, 0x4e, 0x07, 0xad,
- 0xf1, 0xf5, 0xce, 0xd2, 0x16, 0x91, 0x50, 0x24, 0x63, 0x3f, 0x93, 0x67, 0x2e, 0xc9, 0xd0, 0xcc,
- 0xed, 0x20, 0x6a, 0x62, 0x9d, 0x0c, 0xba, 0x02, 0x13, 0xcd, 0xc0, 0xf5, 0xd9, 0x9a, 0x50, 0x9a,
- 0xf2, 0x29, 0x33, 0x49, 0x69, 0x39, 0x89, 0x80, 0xd3, 0x75, 0x58, 0x3c, 0x16, 0x51, 0x38, 0x75,
- 0x8a, 0x27, 0x5a, 0xe3, 0x72, 0x10, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb0, 0x93, 0x98, 0x8b, 0xf0,
- 0xa6, 0xa6, 0xf3, 0xbd, 0xfc, 0x75, 0x51, 0x1f, 0xe7, 0xfd, 0xd5, 0x5f, 0x1c, 0x53, 0x40, 0x35,
- 0x2d, 0xbf, 0x39, 0x7d, 0x41, 0x85, 0x53, 0xa7, 0xdb, 0xd8, 0xeb, 0x26, 0x9e, 0xcb, 0x31, 0x43,
- 0x60, 0x14, 0x87, 0x38, 0x41, 0x13, 0xbd, 0x01, 0xe3, 0x22, 0x58, 0x41, 0x3c, 0x4c, 0x67, 0x62,
- 0x9f, 0x26, 0x9c, 0x80, 0xe1, 0x14, 0x36, 0x4f, 0x74, 0xe6, 0xdc, 0xae, 0x13, 0x71, 0xf4, 0x5d,
- 0x77, 0xbd, 0xed, 0x70, 0xea, 0x2c, 0x3b, 0x1f, 0x44, 0xa2, 0xb3, 0x24, 0x14, 0x67, 0xd4, 0x40,
- 0x6b, 0x30, 0xde, 0x0c, 0x08, 0x69, 0xb0, 0x77, 0x92, 0xb8, 0xcf, 0x4a, 0x3c, 0x1c, 0x11, 0xed,
- 0x49, 0x39, 0x01, 0x3b, 0xc8, 0x28, 0xc3, 0x29, 0x0a, 0xe8, 0x0e, 0x0c, 0xf8, 0x3b, 0x24, 0xd8,
- 0x22, 0x4e, 0x6d, 0xea, 0x5c, 0x1b, 0x4f, 0x3b, 0x71, 0xb9, 0xdd, 0x10, 0xb8, 0x09, 0x43, 0x24,
- 0x59, 0xdc, 0xd9, 0x10, 0x49, 0x36, 0x86, 0xfe, 0x0b, 0x0b, 0x4e, 0x49, 0xd5, 0x5e, 0xa5, 0x49,
- 0x47, 0x7d, 0xde, 0xf7, 0xc2, 0x28, 0xe0, 0x01, 0x74, 0x1e, 0xcd, 0x0f, 0x2a, 0xb3, 0x96, 0x53,
- 0x49, 0x69, 0x11, 0x4e, 0xe5, 0x61, 0x84, 0x38, 0xbf, 0x45, 0xfa, 0xb2, 0x0f, 0x49, 0x24, 0x0f,
- 0xa3, 0xd9, 0x70, 0xe9, 0xad, 0x85, 0xd5, 0xa9, 0xc7, 0x78, 0xf4, 0x1f, 0xba, 0x19, 0x2a, 0x49,
- 0x20, 0x4e, 0xe3, 0xa3, 0x4b, 0x50, 0xf0, 0xc3, 0xa9, 0xc7, 0xdb, 0xa4, 0xc4, 0xf7, 0x6b, 0x37,
- 0x2a, 0xdc, 0x20, 0xf5, 0x46, 0x05, 0x17, 0xfc, 0x50, 0x26, 0x1b, 0xa3, 0xcf, 0xd9, 0x70, 0xea,
- 0x09, 0x2e, 0x73, 0x96, 0xc9, 0xc6, 0x58, 0x21, 0x8e, 0xe1, 0x68, 0x0b, 0xc6, 0x42, 0x43, 0x6c,
- 0x10, 0x4e, 0x9d, 0x67, 0x23, 0xf5, 0x44, 0xde, 0xa4, 0x19, 0xd8, 0x5a, 0x16, 0x20, 0x93, 0x0a,
- 0x4e, 0x92, 0xe5, 0xbb, 0x4b, 0x13, 0x5c, 0x84, 0x53, 0x4f, 0x76, 0xd8, 0x5d, 0x1a, 0xb2, 0xbe,
- 0xbb, 0x74, 0x1a, 0x38, 0x41, 0x13, 0xad, 0xeb, 0x6e, 0x8c, 0x17, 0xf2, 0x8d, 0x1b, 0x33, 0x1d,
- 0x18, 0x47, 0xf2, 0x9c, 0x17, 0xa7, 0xbf, 0x0f, 0x26, 0x52, 0x5c, 0xd8, 0x61, 0x7c, 0x3a, 0xa6,
- 0xb7, 0x61, 0xc4, 0x58, 0xe9, 0x0f, 0xd5, 0xe4, 0xe7, 0xcf, 0x06, 0x61, 0x50, 0x99, 0x62, 0xa0,
- 0x8b, 0xa6, 0x95, 0xcf, 0xa9, 0xa4, 0x95, 0xcf, 0x40, 0xd9, 0xaf, 0x19, 0x86, 0x3d, 0x6b, 0x19,
- 0xb1, 0x72, 0xf3, 0xce, 0xd5, 0xee, 0x1d, 0xcf, 0x34, 0xf5, 0x52, 0xb1, 0x6b, 0x73, 0xa1, 0x9e,
- 0xb6, 0x1a, 0xab, 0x2b, 0x30, 0xe1, 0xf9, 0x8c, 0xf5, 0x27, 0x35, 0xc9, 0xd7, 0x31, 0xf6, 0x6d,
- 0x50, 0x8f, 0xe5, 0x96, 0x40, 0xc0, 0xe9, 0x3a, 0xb4, 0x41, 0xce, 0x7f, 0x25, 0x55, 0x64, 0x9c,
- 0x3d, 0xc3, 0x02, 0x4a, 0x9f, 0x9c, 0xfc, 0x57, 0x38, 0x35, 0x9e, 0xff, 0xe4, 0xe4, 0x95, 0x92,
- 0x3c, 0x5e, 0x28, 0x79, 0x3c, 0xa6, 0x11, 0x6a, 0xfa, 0xb5, 0xe5, 0xb2, 0x78, 0x3d, 0x68, 0x51,
- 0xec, 0x6b, 0xcb, 0x65, 0xcc, 0x61, 0x68, 0x16, 0xfa, 0xd8, 0x0f, 0x19, 0x23, 0x27, 0x6f, 0xf7,
- 0x2f, 0x97, 0xb5, 0x1c, 0xaa, 0xac, 0x02, 0x16, 0x15, 0x99, 0xc4, 0x9f, 0x3e, 0xb9, 0x98, 0xc4,
- 0xbf, 0xff, 0x3e, 0x25, 0xfe, 0x92, 0x00, 0x8e, 0x69, 0xa1, 0xbb, 0x70, 0xdc, 0x78, 0xe6, 0x2a,
- 0x4f, 0x3c, 0xc8, 0x37, 0x06, 0x48, 0x20, 0xcf, 0x9d, 0x11, 0x9d, 0x3e, 0xbe, 0x9c, 0x45, 0x09,
- 0x67, 0x37, 0x80, 0xea, 0x30, 0x51, 0x4d, 0xb5, 0x3a, 0xd0, 0x7d, 0xab, 0x6a, 0x5d, 0xa4, 0x5b,
- 0x4c, 0x13, 0x46, 0xaf, 0xc2, 0xc0, 0xbb, 0x3e, 0x37, 0xdc, 0x13, 0x2f, 0x1e, 0x19, 0x05, 0x66,
- 0xe0, 0xad, 0x1b, 0x15, 0x56, 0x7e, 0xb0, 0x57, 0x1a, 0x2a, 0xfb, 0x35, 0xf9, 0x17, 0xab, 0x0a,
- 0xe8, 0xc7, 0x2c, 0x98, 0x4e, 0xbf, 0xa3, 0x55, 0xa7, 0x47, 0xba, 0xef, 0xb4, 0x2d, 0x1a, 0x9d,
- 0x5e, 0xcc, 0x25, 0x87, 0xdb, 0x34, 0x85, 0x3e, 0x4a, 0xf7, 0x53, 0xe8, 0xde, 0x23, 0x22, 0x01,
- 0xfd, 0xa3, 0xf1, 0x7e, 0xa2, 0xa5, 0x07, 0x7b, 0xa5, 0x31, 0x7e, 0xe0, 0xba, 0xf7, 0x54, 0xbc,
- 0x7d, 0x5e, 0x01, 0xfd, 0x20, 0x1c, 0x0f, 0xd2, 0x72, 0x6d, 0x22, 0x79, 0xfb, 0xa7, 0xbb, 0x39,
- 0xbc, 0x93, 0x13, 0x8e, 0xb3, 0x08, 0xe2, 0xec, 0x76, 0xec, 0xdf, 0xb3, 0x98, 0x3e, 0x43, 0x74,
- 0x8b, 0x84, 0xad, 0x7a, 0x74, 0x04, 0xc6, 0x72, 0x8b, 0x86, 0x3d, 0xc1, 0x7d, 0x5b, 0xbb, 0xfd,
- 0x2f, 0x16, 0xb3, 0x76, 0x3b, 0x42, 0xbf, 0xbd, 0xb7, 0x60, 0x20, 0x12, 0xad, 0x89, 0xae, 0xe7,
- 0x59, 0xe6, 0xc8, 0x4e, 0x31, 0x8b, 0x3f, 0xf5, 0x76, 0x92, 0xa5, 0x58, 0x91, 0xb1, 0xff, 0x47,
- 0x3e, 0x03, 0x12, 0x72, 0x04, 0x6a, 0xdb, 0x05, 0x53, 0x6d, 0x5b, 0xea, 0xf0, 0x05, 0x39, 0xea,
- 0xdb, 0xff, 0xc1, 0xec, 0x37, 0x93, 0x19, 0xbe, 0xdf, 0xcd, 0x2c, 0xed, 0x2f, 0x5a, 0x00, 0x71,
- 0x82, 0x93, 0x2e, 0x12, 0x4e, 0x5f, 0xa6, 0xaf, 0x25, 0x3f, 0xf2, 0xab, 0x7e, 0x5d, 0xa8, 0x8d,
- 0x4e, 0xc7, 0x9a, 0x63, 0x5e, 0x7e, 0xa0, 0xfd, 0xc6, 0x0a, 0x1b, 0x95, 0x64, 0xc4, 0xe1, 0x62,
- 0x6c, 0xcb, 0x60, 0x44, 0x1b, 0xfe, 0x8a, 0x05, 0xc7, 0xb2, 0x9c, 0x40, 0xe8, 0xdb, 0x9b, 0x4b,
- 0x4f, 0x95, 0x09, 0xac, 0x9a, 0xcd, 0x9b, 0xa2, 0x1c, 0x2b, 0x8c, 0xae, 0x33, 0x79, 0x1f, 0x2e,
- 0xf9, 0xc6, 0x0d, 0x18, 0x29, 0x07, 0x44, 0xe3, 0x2f, 0x5e, 0x8f, 0xf3, 0x02, 0x0d, 0xce, 0x3d,
- 0x7b, 0xe8, 0xc8, 0x4a, 0xf6, 0x57, 0x0b, 0x70, 0x8c, 0x1b, 0x72, 0xcd, 0xee, 0xf8, 0x6e, 0xad,
- 0xec, 0xd7, 0x84, 0xeb, 0xee, 0xdb, 0x30, 0xdc, 0xd4, 0x44, 0xde, 0xed, 0x02, 0xc9, 0xeb, 0xa2,
- 0xf1, 0x58, 0x48, 0xa7, 0x97, 0x62, 0x83, 0x16, 0xaa, 0xc1, 0x30, 0xd9, 0x71, 0xab, 0xca, 0x1a,
- 0xa8, 0x70, 0xe8, 0x4b, 0x5a, 0xb5, 0xb2, 0xa8, 0xd1, 0xc1, 0x06, 0xd5, 0xae, 0xcd, 0xaf, 0x35,
- 0x16, 0xad, 0xa7, 0x83, 0x05, 0xd0, 0xcf, 0x5a, 0x70, 0x32, 0x27, 0xec, 0x3c, 0x6d, 0xee, 0x0e,
- 0x33, 0x99, 0x13, 0xcb, 0x56, 0x35, 0xc7, 0x0d, 0xe9, 0xb0, 0x80, 0xa2, 0x8f, 0x03, 0x34, 0xe3,
- 0x94, 0x9b, 0x1d, 0xe2, 0x73, 0x1b, 0x91, 0x7a, 0xb5, 0xa0, 0xab, 0x2a, 0x33, 0xa7, 0x46, 0xcb,
- 0xfe, 0x4a, 0x0f, 0xf4, 0x32, 0xc3, 0x2b, 0x54, 0x86, 0xfe, 0x2d, 0x1e, 0x13, 0xb0, 0xed, 0xbc,
- 0x51, 0x5c, 0x19, 0x64, 0x30, 0x9e, 0x37, 0xad, 0x14, 0x4b, 0x32, 0x68, 0x05, 0x26, 0x79, 0x3a,
- 0xd1, 0xfa, 0x02, 0xa9, 0x3b, 0xbb, 0x52, 0x9a, 0x5c, 0x60, 0x9f, 0xaa, 0xa4, 0xea, 0xcb, 0x69,
- 0x14, 0x9c, 0x55, 0x0f, 0xbd, 0x0e, 0xa3, 0xf4, 0x75, 0xef, 0xb7, 0x22, 0x49, 0x89, 0xe7, 0xef,
- 0x54, 0x0f, 0x9e, 0x35, 0x03, 0x8a, 0x13, 0xd8, 0xe8, 0x55, 0x18, 0x69, 0xa6, 0xe4, 0xe6, 0xbd,
- 0xb1, 0x80, 0xc9, 0x94, 0x95, 0x9b, 0xb8, 0xcc, 0x0f, 0xa4, 0xc5, 0xbc, 0x5e, 0xd6, 0xb6, 0x02,
- 0x12, 0x6e, 0xf9, 0xf5, 0x1a, 0xe3, 0x80, 0x7b, 0x35, 0x3f, 0x90, 0x04, 0x1c, 0xa7, 0x6a, 0x50,
- 0x2a, 0x1b, 0x8e, 0x5b, 0x6f, 0x05, 0x24, 0xa6, 0xd2, 0x67, 0x52, 0x59, 0x4a, 0xc0, 0x71, 0xaa,
- 0x46, 0x67, 0x85, 0x40, 0xff, 0x83, 0x51, 0x08, 0xd8, 0xbf, 0x5c, 0x00, 0x63, 0x6a, 0xbf, 0x87,
- 0xf3, 0x8a, 0xbe, 0x06, 0x3d, 0x9b, 0x41, 0xb3, 0x2a, 0x8c, 0x0c, 0x33, 0xbf, 0xec, 0x0a, 0x2e,
- 0xcf, 0xeb, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8b, 0xee, 0xf1, 0xe3, 0xe5, 0xc0, 0xa7, 0x97, 0x9c,
- 0x0c, 0x1b, 0xaa, 0xdc, 0xad, 0xfa, 0xe5, 0x1b, 0xbb, 0x4d, 0x80, 0x6d, 0xe1, 0x33, 0xc2, 0x29,
- 0x18, 0xf6, 0x78, 0x15, 0xf1, 0xc2, 0x96, 0x54, 0xd0, 0x25, 0x18, 0x12, 0xa9, 0x1e, 0x99, 0x57,
- 0x10, 0xdf, 0x4c, 0xcc, 0x7e, 0x70, 0x21, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0xe3, 0x05, 0x98, 0xcc,
- 0x70, 0xeb, 0xe4, 0xd7, 0xc8, 0xa6, 0x1b, 0x46, 0xc1, 0x6e, 0xf2, 0x72, 0xc2, 0xa2, 0x1c, 0x2b,
- 0x0c, 0x7a, 0x56, 0xf1, 0x8b, 0x2a, 0x79, 0x39, 0x09, 0xb7, 0x29, 0x01, 0x3d, 0xdc, 0xe5, 0x44,
- 0xaf, 0xed, 0x56, 0x48, 0x64, 0x2c, 0x7f, 0x75, 0x6d, 0x33, 0x63, 0x03, 0x06, 0xa1, 0x4f, 0xc0,
- 0x4d, 0xa5, 0x41, 0xd7, 0x9e, 0x80, 0x5c, 0x87, 0xce, 0x61, 0xb4, 0x73, 0x11, 0xf1, 0x1c, 0x2f,
- 0x12, 0x0f, 0xc5, 0x38, 0xc6, 0x33, 0x2b, 0xc5, 0x02, 0x6a, 0x7f, 0xb9, 0x08, 0xa7, 0x72, 0x1d,
- 0xbd, 0x69, 0xd7, 0x1b, 0xbe, 0xe7, 0x46, 0xbe, 0x32, 0xcc, 0xe4, 0x71, 0x9d, 0x49, 0x73, 0x6b,
- 0x45, 0x94, 0x63, 0x85, 0x81, 0xce, 0x43, 0x2f, 0x93, 0xb5, 0x27, 0xd3, 0xbc, 0xe1, 0xb9, 0x05,
- 0x1e, 0x31, 0x93, 0x83, 0xb5, 0x5b, 0xbd, 0xd8, 0xf6, 0x56, 0x7f, 0x8c, 0x72, 0x30, 0x7e, 0x3d,
- 0x79, 0xa1, 0xd0, 0xee, 0xfa, 0x7e, 0x1d, 0x33, 0x20, 0x7a, 0x42, 0x8c, 0x57, 0xc2, 0x12, 0x11,
- 0x3b, 0x35, 0x3f, 0xd4, 0x06, 0xed, 0x29, 0xe8, 0xdf, 0x26, 0xbb, 0x81, 0xeb, 0x6d, 0x26, 0x2d,
- 0x54, 0xaf, 0xf1, 0x62, 0x2c, 0xe1, 0x66, 0x56, 0xf3, 0xfe, 0x07, 0x91, 0xd5, 0x5c, 0x5f, 0x01,
- 0x03, 0x1d, 0xd9, 0x93, 0x9f, 0x28, 0xc2, 0x18, 0x9e, 0x5b, 0xf8, 0x60, 0x22, 0xd6, 0xd3, 0x13,
- 0xf1, 0x20, 0x92, 0x7f, 0x1f, 0x6e, 0x36, 0x7e, 0xdb, 0x82, 0x31, 0x96, 0x70, 0x52, 0x44, 0x69,
- 0x71, 0x7d, 0xef, 0x08, 0x9e, 0x02, 0x8f, 0x41, 0x6f, 0x40, 0x1b, 0x15, 0x33, 0xa8, 0xf6, 0x38,
- 0xeb, 0x09, 0xe6, 0x30, 0x74, 0x1a, 0x7a, 0x58, 0x17, 0xe8, 0xe4, 0x0d, 0xf3, 0x23, 0x78, 0xc1,
- 0x89, 0x1c, 0xcc, 0x4a, 0x59, 0xbc, 0x48, 0x4c, 0x9a, 0x75, 0x97, 0x77, 0x3a, 0xb6, 0x84, 0x78,
- 0x7f, 0x84, 0x80, 0xc9, 0xec, 0xda, 0x7b, 0x8b, 0x17, 0x99, 0x4d, 0xb2, 0xfd, 0x33, 0xfb, 0x1f,
- 0x0a, 0x70, 0x36, 0xb3, 0x5e, 0xd7, 0xf1, 0x22, 0xdb, 0xd7, 0x7e, 0x98, 0xe9, 0xe9, 0x8a, 0x47,
- 0x68, 0xff, 0xdf, 0xd3, 0x2d, 0xf7, 0xdf, 0xdb, 0x45, 0x18, 0xc7, 0xcc, 0x21, 0x7b, 0x9f, 0x84,
- 0x71, 0xcc, 0xec, 0x5b, 0x8e, 0x98, 0xe0, 0x5f, 0x0b, 0x39, 0xdf, 0xc2, 0x04, 0x06, 0x17, 0xe8,
- 0x39, 0xc3, 0x80, 0xa1, 0x7c, 0x84, 0xf3, 0x33, 0x86, 0x97, 0x61, 0x05, 0x45, 0xb3, 0x30, 0xd6,
- 0x70, 0x3d, 0x7a, 0xf8, 0xec, 0x9a, 0xac, 0xb8, 0x52, 0x91, 0xac, 0x98, 0x60, 0x9c, 0xc4, 0x47,
- 0xae, 0x16, 0xe2, 0x91, 0x7f, 0xdd, 0xab, 0x87, 0xda, 0x75, 0x33, 0xa6, 0x95, 0x88, 0x1a, 0xc5,
- 0x8c, 0x70, 0x8f, 0x2b, 0x9a, 0x9c, 0xa8, 0xd8, 0xbd, 0x9c, 0x68, 0x38, 0x5b, 0x46, 0x34, 0xfd,
- 0x2a, 0x8c, 0xdc, 0xb7, 0x6e, 0xc4, 0xfe, 0x56, 0x11, 0x1e, 0x69, 0xb3, 0xed, 0xf9, 0x59, 0x6f,
- 0xcc, 0x81, 0x76, 0xd6, 0xa7, 0xe6, 0xa1, 0x0c, 0xc7, 0x36, 0x5a, 0xf5, 0xfa, 0x2e, 0x73, 0x74,
- 0x23, 0x35, 0x89, 0x21, 0x78, 0x4a, 0x29, 0x1c, 0x39, 0xb6, 0x94, 0x81, 0x83, 0x33, 0x6b, 0xd2,
- 0x27, 0x16, 0xbd, 0x49, 0x76, 0x15, 0xa9, 0xc4, 0x13, 0x0b, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0x2b,
- 0x30, 0xe1, 0xec, 0x38, 0x2e, 0x4f, 0xef, 0x21, 0x09, 0xf0, 0x37, 0x96, 0x92, 0x45, 0xcf, 0x26,
- 0x11, 0x70, 0xba, 0x0e, 0x7a, 0x13, 0x90, 0x7f, 0x9b, 0x39, 0xcf, 0xd4, 0xae, 0x10, 0x4f, 0x28,
- 0xf3, 0xd9, 0xdc, 0x15, 0xe3, 0x23, 0xe1, 0x46, 0x0a, 0x03, 0x67, 0xd4, 0x4a, 0x04, 0x1b, 0xec,
- 0xcb, 0x0f, 0x36, 0xd8, 0xfe, 0x5c, 0xec, 0x98, 0x19, 0xf1, 0x1d, 0x18, 0x39, 0xac, 0xb5, 0xf7,
- 0x53, 0xd0, 0x1f, 0x88, 0x9c, 0xf3, 0x09, 0xaf, 0x72, 0x99, 0x91, 0x5b, 0xc2, 0xed, 0xff, 0xc7,
- 0x02, 0x25, 0x4b, 0x36, 0xe3, 0x8a, 0xbf, 0xca, 0x4c, 0xd7, 0xb9, 0x14, 0x5c, 0x0b, 0x25, 0x76,
- 0x5c, 0x33, 0x5d, 0x8f, 0x81, 0xd8, 0xc4, 0xe5, 0xcb, 0x2d, 0x8c, 0x23, 0x58, 0x18, 0x0f, 0x08,
- 0xa1, 0x35, 0x54, 0x18, 0xe8, 0x13, 0xd0, 0x5f, 0x73, 0x77, 0xdc, 0x50, 0xc8, 0xd1, 0x0e, 0xad,
- 0xb7, 0x8b, 0xbf, 0x6f, 0x81, 0x93, 0xc1, 0x92, 0x9e, 0xfd, 0x53, 0x16, 0x28, 0x75, 0xe7, 0x55,
- 0xe2, 0xd4, 0xa3, 0x2d, 0xf4, 0x06, 0x80, 0xa4, 0xa0, 0x64, 0x6f, 0xd2, 0x08, 0x0b, 0xb0, 0x82,
- 0x1c, 0x18, 0xff, 0xb0, 0x56, 0x07, 0xbd, 0x0e, 0x7d, 0x5b, 0x8c, 0x96, 0xf8, 0xb6, 0xf3, 0x4a,
- 0xd5, 0xc5, 0x4a, 0x0f, 0xf6, 0x4a, 0xc7, 0xcc, 0x36, 0xe5, 0x2d, 0xc6, 0x6b, 0xd9, 0x3f, 0x51,
- 0x88, 0xe7, 0xf4, 0xad, 0x96, 0x1f, 0x39, 0x47, 0xc0, 0x89, 0x5c, 0x31, 0x38, 0x91, 0x27, 0xda,
- 0xe9, 0x73, 0x59, 0x97, 0x72, 0x39, 0x90, 0x1b, 0x09, 0x0e, 0xe4, 0xc9, 0xce, 0xa4, 0xda, 0x73,
- 0x1e, 0xff, 0x93, 0x05, 0x13, 0x06, 0xfe, 0x11, 0x5c, 0x80, 0x4b, 0xe6, 0x05, 0xf8, 0x68, 0xc7,
- 0x6f, 0xc8, 0xb9, 0xf8, 0x7e, 0xb4, 0x98, 0xe8, 0x3b, 0xbb, 0xf0, 0xde, 0x85, 0x9e, 0x2d, 0x27,
- 0xa8, 0x89, 0x77, 0xfd, 0xc5, 0xae, 0xc6, 0x7a, 0xe6, 0xaa, 0x13, 0x08, 0x03, 0x8e, 0x67, 0xe5,
- 0xa8, 0xd3, 0xa2, 0x8e, 0xc6, 0x1b, 0xac, 0x29, 0x74, 0x19, 0xfa, 0xc2, 0xaa, 0xdf, 0x54, 0x7e,
- 0x80, 0x2c, 0x5d, 0x78, 0x85, 0x95, 0x1c, 0xec, 0x95, 0x90, 0xd9, 0x1c, 0x2d, 0xc6, 0x02, 0x1f,
- 0xbd, 0x0d, 0x23, 0xec, 0x97, 0xb2, 0xa6, 0x2c, 0xe6, 0x4b, 0x60, 0x2a, 0x3a, 0x22, 0x37, 0x35,
- 0x36, 0x8a, 0xb0, 0x49, 0x6a, 0x7a, 0x13, 0x06, 0xd5, 0x67, 0x3d, 0x54, 0x6d, 0xfd, 0xff, 0x59,
- 0x84, 0xc9, 0x8c, 0x35, 0x87, 0x42, 0x63, 0x26, 0x2e, 0x75, 0xb9, 0x54, 0xdf, 0xe3, 0x5c, 0x84,
- 0xec, 0x01, 0x58, 0x13, 0x6b, 0xab, 0xeb, 0x46, 0xd7, 0x43, 0x92, 0x6c, 0x94, 0x16, 0x75, 0x6e,
- 0x94, 0x36, 0x76, 0x64, 0x43, 0x4d, 0x1b, 0x52, 0x3d, 0x7d, 0xa8, 0x73, 0xfa, 0x87, 0x3d, 0x70,
- 0x2c, 0xcb, 0xc4, 0x04, 0x7d, 0x0e, 0xfa, 0x98, 0xa3, 0x9a, 0x14, 0x9c, 0xbd, 0xd8, 0xad, 0x71,
- 0xca, 0x0c, 0xf3, 0x75, 0x13, 0xa1, 0x69, 0x67, 0xe4, 0x71, 0xc4, 0x0b, 0x3b, 0x0e, 0xb3, 0x68,
- 0x93, 0x85, 0x8c, 0x12, 0xb7, 0xa7, 0x3c, 0x3e, 0x3e, 0xd2, 0x75, 0x07, 0xc4, 0xfd, 0x1b, 0x26,
- 0x2c, 0xb5, 0x64, 0x71, 0x67, 0x4b, 0x2d, 0xd9, 0x32, 0x5a, 0x86, 0xbe, 0x2a, 0x37, 0x01, 0x2a,
- 0x76, 0x3e, 0xc2, 0xb8, 0xfd, 0x8f, 0x3a, 0x80, 0x85, 0xdd, 0x8f, 0x20, 0x30, 0xed, 0xc2, 0x90,
- 0x36, 0x30, 0x0f, 0x75, 0xf1, 0x6c, 0xd3, 0x8b, 0x4f, 0x1b, 0x82, 0x87, 0xba, 0x80, 0x7e, 0x46,
- 0xbb, 0xfb, 0xc5, 0x79, 0xf0, 0x61, 0x83, 0x77, 0x3a, 0x9d, 0x70, 0x1f, 0x4c, 0xec, 0x2b, 0xc6,
- 0x4b, 0x55, 0xcc, 0x98, 0xee, 0xb9, 0xa9, 0xa1, 0xcc, 0x0b, 0xbf, 0x7d, 0x1c, 0x77, 0xfb, 0x67,
- 0x2d, 0x48, 0x38, 0x78, 0x29, 0x71, 0xa7, 0x95, 0x2b, 0xee, 0x3c, 0x07, 0x3d, 0x81, 0x5f, 0x27,
- 0xc9, 0xd4, 0xfb, 0xd8, 0xaf, 0x13, 0xcc, 0x20, 0x14, 0x23, 0x8a, 0x85, 0x58, 0xc3, 0xfa, 0x03,
- 0x5d, 0x3c, 0xbd, 0x1f, 0x83, 0xde, 0x3a, 0xd9, 0x21, 0xf5, 0x64, 0x86, 0xd4, 0xeb, 0xb4, 0x10,
- 0x73, 0x98, 0xfd, 0xdb, 0x3d, 0x70, 0xa6, 0x6d, 0x64, 0x39, 0xca, 0x60, 0x6e, 0x3a, 0x11, 0xb9,
- 0xe3, 0xec, 0x26, 0x33, 0x03, 0x5e, 0xe1, 0xc5, 0x58, 0xc2, 0x99, 0xb3, 0x35, 0xcf, 0x94, 0x93,
- 0x10, 0x0e, 0x8b, 0x04, 0x39, 0x02, 0x6a, 0x0a, 0x1b, 0x8b, 0x0f, 0x42, 0xd8, 0xf8, 0x3c, 0x40,
- 0x18, 0xd6, 0xb9, 0x1d, 0x67, 0x4d, 0x78, 0x71, 0xc7, 0x19, 0x95, 0x2a, 0xd7, 0x05, 0x04, 0x6b,
- 0x58, 0x68, 0x01, 0xc6, 0x9b, 0x81, 0x1f, 0x71, 0x59, 0xfb, 0x02, 0x37, 0x75, 0xee, 0x35, 0x83,
- 0x7a, 0x95, 0x13, 0x70, 0x9c, 0xaa, 0x81, 0x5e, 0x82, 0x21, 0x11, 0xe8, 0xab, 0xec, 0xfb, 0x75,
- 0x21, 0xde, 0x53, 0xd6, 0xbf, 0x95, 0x18, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0x13, 0xe0, 0xf7, 0x67,
- 0x56, 0xe3, 0x42, 0x7c, 0x0d, 0x2f, 0x91, 0x14, 0x60, 0xa0, 0xab, 0xa4, 0x00, 0xb1, 0xc0, 0x73,
- 0xb0, 0x6b, 0x7d, 0x32, 0x74, 0x14, 0x11, 0x7e, 0xad, 0x07, 0x26, 0xc5, 0xc2, 0x79, 0xd8, 0xcb,
- 0x65, 0x3d, 0xbd, 0x5c, 0x1e, 0x84, 0x48, 0xf4, 0x83, 0x35, 0x73, 0xd4, 0x6b, 0xe6, 0x27, 0x2d,
- 0x30, 0x79, 0x48, 0xf4, 0x9f, 0xe5, 0xa6, 0x56, 0x7d, 0x29, 0x97, 0x27, 0x8d, 0x23, 0x86, 0xbf,
- 0xb7, 0x24, 0xab, 0xf6, 0xff, 0x65, 0xc1, 0xa3, 0x1d, 0x29, 0xa2, 0x45, 0x18, 0x64, 0x8c, 0xae,
- 0xf6, 0x2e, 0x7e, 0x52, 0xb9, 0x42, 0x48, 0x40, 0x0e, 0xdf, 0x1d, 0xd7, 0x44, 0x8b, 0xa9, 0x1c,
- 0xb6, 0x4f, 0x65, 0xe4, 0xb0, 0x3d, 0x6e, 0x0c, 0xcf, 0x7d, 0x26, 0xb1, 0xfd, 0x12, 0xbd, 0x71,
- 0x4c, 0x7f, 0xca, 0x8f, 0x18, 0xe2, 0x5c, 0x3b, 0x21, 0xce, 0x45, 0x26, 0xb6, 0x76, 0x87, 0xbc,
- 0x01, 0xe3, 0x2c, 0x02, 0x28, 0x73, 0xcc, 0x11, 0x8e, 0x98, 0x85, 0xd8, 0xf8, 0xfe, 0x7a, 0x02,
- 0x86, 0x53, 0xd8, 0xf6, 0xdf, 0x15, 0xa1, 0x8f, 0x6f, 0xbf, 0x23, 0x78, 0xf8, 0x3e, 0x03, 0x83,
- 0x6e, 0xa3, 0xd1, 0xe2, 0x69, 0x49, 0x7b, 0x63, 0x53, 0xee, 0x65, 0x59, 0x88, 0x63, 0x38, 0x5a,
- 0x12, 0x9a, 0x84, 0x36, 0x41, 0xc6, 0x79, 0xc7, 0x67, 0x16, 0x9c, 0xc8, 0xe1, 0x5c, 0x9c, 0xba,
- 0x67, 0x63, 0x9d, 0x03, 0xfa, 0x34, 0x40, 0x18, 0x05, 0xae, 0xb7, 0x49, 0xcb, 0x44, 0x26, 0x8a,
- 0xa7, 0xdb, 0x50, 0xab, 0x28, 0x64, 0x4e, 0x33, 0x3e, 0x73, 0x14, 0x00, 0x6b, 0x14, 0xd1, 0x8c,
- 0x71, 0xd3, 0x4f, 0x27, 0xe6, 0x0e, 0x38, 0xd5, 0x78, 0xce, 0xa6, 0x5f, 0x86, 0x41, 0x45, 0xbc,
- 0x93, 0x5c, 0x71, 0x58, 0x67, 0xd8, 0x3e, 0x06, 0x63, 0x89, 0xbe, 0x1d, 0x4a, 0x2c, 0xf9, 0x3b,
- 0x16, 0x8c, 0xf1, 0xce, 0x2c, 0x7a, 0x3b, 0xe2, 0x36, 0xb8, 0x07, 0xc7, 0xea, 0x19, 0xa7, 0xb2,
- 0x98, 0xfe, 0xee, 0x4f, 0x71, 0x25, 0x86, 0xcc, 0x82, 0xe2, 0xcc, 0x36, 0xd0, 0x05, 0xba, 0xe3,
- 0xe8, 0xa9, 0xeb, 0xd4, 0x45, 0x34, 0x91, 0x61, 0xbe, 0xdb, 0x78, 0x19, 0x56, 0x50, 0xfb, 0xaf,
- 0x2c, 0x98, 0xe0, 0x3d, 0xbf, 0x46, 0x76, 0xd5, 0xd9, 0xf4, 0x9d, 0xec, 0xbb, 0x48, 0x88, 0x5d,
- 0xc8, 0x49, 0x88, 0xad, 0x7f, 0x5a, 0xb1, 0xed, 0xa7, 0x7d, 0xd5, 0x02, 0xb1, 0x42, 0x8e, 0x40,
- 0xd2, 0xf2, 0x7d, 0xa6, 0xa4, 0x65, 0x3a, 0x7f, 0x13, 0xe4, 0x88, 0x58, 0xfe, 0xc5, 0x82, 0x71,
- 0x8e, 0x10, 0x5b, 0x41, 0x7c, 0x47, 0xe7, 0x61, 0xce, 0xfc, 0xa2, 0x4c, 0xb3, 0xd6, 0x6b, 0x64,
- 0x77, 0xcd, 0x2f, 0x3b, 0xd1, 0x56, 0xf6, 0x47, 0x19, 0x93, 0xd5, 0xd3, 0x76, 0xb2, 0x6a, 0x72,
- 0x03, 0x19, 0x89, 0x17, 0x3b, 0x08, 0x80, 0x0f, 0x9b, 0x78, 0xd1, 0xfe, 0x7b, 0x0b, 0x10, 0x6f,
- 0xc6, 0x60, 0xdc, 0x28, 0x3b, 0xc4, 0x4a, 0xb5, 0x8b, 0x2e, 0x3e, 0x9a, 0x14, 0x04, 0x6b, 0x58,
- 0x0f, 0x64, 0x78, 0x12, 0xa6, 0x2c, 0xc5, 0xce, 0xa6, 0x2c, 0x87, 0x18, 0xd1, 0xaf, 0xf6, 0x43,
- 0xd2, 0x15, 0x13, 0xdd, 0x84, 0xe1, 0xaa, 0xd3, 0x74, 0x6e, 0xbb, 0x75, 0x37, 0x72, 0x49, 0xd8,
- 0xce, 0xce, 0x6d, 0x5e, 0xc3, 0x13, 0xc6, 0x07, 0x5a, 0x09, 0x36, 0xe8, 0xa0, 0x19, 0x80, 0x66,
- 0xe0, 0xee, 0xb8, 0x75, 0xb2, 0xc9, 0x04, 0x42, 0x2c, 0x7e, 0x11, 0x37, 0xba, 0x93, 0xa5, 0x58,
- 0xc3, 0xc8, 0x08, 0x1b, 0x52, 0x7c, 0xc8, 0x61, 0x43, 0xe0, 0xc8, 0xc2, 0x86, 0xf4, 0x1c, 0x2a,
- 0x6c, 0xc8, 0xc0, 0xa1, 0xc3, 0x86, 0xf4, 0x76, 0x15, 0x36, 0x04, 0xc3, 0x09, 0xc9, 0x7b, 0xd2,
- 0xff, 0x4b, 0x6e, 0x9d, 0x88, 0x07, 0x07, 0x0f, 0xba, 0x34, 0xbd, 0xbf, 0x57, 0x3a, 0x81, 0x33,
- 0x31, 0x70, 0x4e, 0x4d, 0xf4, 0x71, 0x98, 0x72, 0xea, 0x75, 0xff, 0x8e, 0x9a, 0xd4, 0xc5, 0xb0,
- 0xea, 0xd4, 0xb9, 0x72, 0xa9, 0x9f, 0x51, 0x3d, 0xbd, 0xbf, 0x57, 0x9a, 0x9a, 0xcd, 0xc1, 0xc1,
- 0xb9, 0xb5, 0xd1, 0x6b, 0x30, 0xd8, 0x0c, 0xfc, 0xea, 0x8a, 0xe6, 0x2f, 0x7e, 0x96, 0x0e, 0x60,
- 0x59, 0x16, 0x1e, 0xec, 0x95, 0x46, 0xd4, 0x1f, 0x76, 0xe1, 0xc7, 0x15, 0x32, 0x22, 0x72, 0x0c,
- 0x3d, 0xec, 0x88, 0x1c, 0xc3, 0x0f, 0x38, 0x22, 0x87, 0xbd, 0x0d, 0x93, 0x15, 0x12, 0xb8, 0x4e,
- 0xdd, 0xbd, 0x47, 0x79, 0x72, 0x79, 0x06, 0xae, 0xc1, 0x60, 0x90, 0x38, 0xf5, 0xbb, 0x0a, 0x2e,
- 0xae, 0xc9, 0x65, 0xe4, 0x29, 0x1f, 0x13, 0xb2, 0xff, 0xbd, 0x05, 0xfd, 0xc2, 0xbd, 0xf3, 0x08,
- 0x38, 0xd3, 0x59, 0x43, 0x25, 0x53, 0xca, 0x9e, 0x14, 0xd6, 0x99, 0x5c, 0x65, 0xcc, 0x72, 0x42,
- 0x19, 0xf3, 0x68, 0x3b, 0x22, 0xed, 0xd5, 0x30, 0xff, 0x75, 0x91, 0xbe, 0x10, 0x8c, 0x40, 0x03,
- 0x0f, 0x7f, 0x08, 0x56, 0xa1, 0x3f, 0x14, 0x8e, 0xee, 0x85, 0x7c, 0x5f, 0x9e, 0xe4, 0x24, 0xc6,
- 0x36, 0x90, 0xc2, 0xb5, 0x5d, 0x12, 0xc9, 0xf4, 0xa0, 0x2f, 0x3e, 0x44, 0x0f, 0xfa, 0x4e, 0xa1,
- 0x18, 0x7a, 0x1e, 0x44, 0x28, 0x06, 0xfb, 0x1b, 0xec, 0x76, 0xd6, 0xcb, 0x8f, 0x80, 0x71, 0xbb,
- 0x62, 0xde, 0xe3, 0x76, 0x9b, 0x95, 0x25, 0x3a, 0x95, 0xc3, 0xc0, 0xfd, 0x96, 0x05, 0x67, 0x32,
- 0xbe, 0x4a, 0xe3, 0xe6, 0x9e, 0x85, 0x01, 0xa7, 0x55, 0x73, 0xd5, 0x5e, 0xd6, 0xb4, 0xc5, 0xb3,
- 0xa2, 0x1c, 0x2b, 0x0c, 0x34, 0x0f, 0x13, 0xe4, 0x6e, 0xd3, 0xe5, 0x6a, 0x78, 0xdd, 0x74, 0xbc,
- 0xc8, 0x7d, 0x82, 0x17, 0x93, 0x40, 0x9c, 0xc6, 0x57, 0xe1, 0xdc, 0x8a, 0xb9, 0xe1, 0xdc, 0x7e,
- 0xdd, 0x82, 0x21, 0xe5, 0xea, 0xfd, 0xd0, 0x47, 0xfb, 0x0d, 0x73, 0xb4, 0x1f, 0x69, 0x33, 0xda,
- 0x39, 0xc3, 0xfc, 0x97, 0x05, 0xd5, 0xdf, 0xb2, 0x1f, 0x44, 0x5d, 0x70, 0x89, 0xf7, 0xef, 0xf6,
- 0x72, 0x09, 0x86, 0x9c, 0x66, 0x53, 0x02, 0xa4, 0xfd, 0x22, 0x4b, 0x15, 0x11, 0x17, 0x63, 0x1d,
- 0x47, 0x79, 0xe1, 0x14, 0x73, 0xbd, 0x70, 0x6a, 0x00, 0x91, 0x13, 0x6c, 0x92, 0x88, 0x96, 0x09,
- 0x73, 0xeb, 0xfc, 0xf3, 0xa6, 0x15, 0xb9, 0xf5, 0x19, 0xd7, 0x8b, 0xc2, 0x28, 0x98, 0x59, 0xf6,
- 0xa2, 0x1b, 0x01, 0x7f, 0xa6, 0x6a, 0x41, 0x13, 0x15, 0x2d, 0xac, 0xd1, 0x95, 0x61, 0x4d, 0x58,
- 0x1b, 0xbd, 0xa6, 0x21, 0xcc, 0xaa, 0x28, 0xc7, 0x0a, 0xc3, 0x7e, 0x99, 0xdd, 0x3e, 0x6c, 0x4c,
- 0x0f, 0x17, 0x0c, 0xf0, 0x1f, 0x86, 0xd5, 0x6c, 0x30, 0x95, 0xf0, 0x82, 0x1e, 0x72, 0xb0, 0xfd,
- 0x61, 0x4f, 0x1b, 0xd6, 0xfd, 0x59, 0xe3, 0xb8, 0x84, 0xe8, 0x93, 0x29, 0xe3, 0xa6, 0xe7, 0x3a,
- 0xdc, 0x1a, 0x87, 0x30, 0x67, 0x62, 0x79, 0xe3, 0x58, 0x56, 0xad, 0xe5, 0xb2, 0xd8, 0x17, 0x5a,
- 0xde, 0x38, 0x01, 0xc0, 0x31, 0x0e, 0x65, 0xd8, 0xd4, 0x9f, 0x70, 0x0a, 0xc5, 0xe1, 0xc5, 0x15,
- 0x76, 0x88, 0x35, 0x0c, 0x74, 0x51, 0x08, 0x2d, 0xb8, 0xee, 0xe1, 0x91, 0x84, 0xd0, 0x42, 0x0e,
- 0x97, 0x26, 0x69, 0xba, 0x04, 0x43, 0xe4, 0x6e, 0x44, 0x02, 0xcf, 0xa9, 0xd3, 0x16, 0x7a, 0xe3,
- 0x88, 0xb8, 0x8b, 0x71, 0x31, 0xd6, 0x71, 0xd0, 0x1a, 0x8c, 0x85, 0x5c, 0x96, 0xa7, 0x92, 0x5a,
- 0x70, 0x99, 0xe8, 0xd3, 0xca, 0xc9, 0xde, 0x04, 0x1f, 0xb0, 0x22, 0x7e, 0x3a, 0xc9, 0xd0, 0x23,
- 0x49, 0x12, 0xe8, 0x75, 0x18, 0xad, 0xfb, 0x4e, 0x6d, 0xce, 0xa9, 0x3b, 0x5e, 0x95, 0x8d, 0xcf,
- 0x80, 0x11, 0x7f, 0x72, 0xf4, 0xba, 0x01, 0xc5, 0x09, 0x6c, 0xca, 0x20, 0xea, 0x25, 0x22, 0x11,
- 0x8b, 0xe3, 0x6d, 0x92, 0x70, 0x6a, 0x90, 0x7d, 0x15, 0x63, 0x10, 0xaf, 0xe7, 0xe0, 0xe0, 0xdc,
- 0xda, 0xe8, 0x32, 0x0c, 0xcb, 0xcf, 0xd7, 0x22, 0xf5, 0xc4, 0x0e, 0x4d, 0x1a, 0x0c, 0x1b, 0x98,
- 0x28, 0x84, 0xe3, 0xf2, 0xff, 0x5a, 0xe0, 0x6c, 0x6c, 0xb8, 0x55, 0x11, 0xbe, 0x82, 0x3b, 0x7f,
- 0x7f, 0x4c, 0x7a, 0x9a, 0x2e, 0x66, 0x21, 0x1d, 0xec, 0x95, 0x4e, 0x8b, 0x51, 0xcb, 0x84, 0xe3,
- 0x6c, 0xda, 0x68, 0x05, 0x26, 0xb9, 0x0d, 0xcc, 0xfc, 0x16, 0xa9, 0x6e, 0xcb, 0x0d, 0xc7, 0xb8,
- 0x46, 0xcd, 0xf1, 0xe7, 0x6a, 0x1a, 0x05, 0x67, 0xd5, 0x43, 0xef, 0xc0, 0x54, 0xb3, 0x75, 0xbb,
- 0xee, 0x86, 0x5b, 0xab, 0x7e, 0xc4, 0x4c, 0xc8, 0x66, 0x6b, 0xb5, 0x80, 0x84, 0xdc, 0x37, 0x98,
- 0x5d, 0xbd, 0x32, 0xba, 0x52, 0x39, 0x07, 0x0f, 0xe7, 0x52, 0x40, 0xf7, 0xe0, 0x78, 0x62, 0x21,
- 0x88, 0x30, 0x29, 0xa3, 0xf9, 0x29, 0xad, 0x2a, 0x59, 0x15, 0x44, 0xc4, 0xa1, 0x2c, 0x10, 0xce,
- 0x6e, 0x02, 0xbd, 0x02, 0xe0, 0x36, 0x97, 0x9c, 0x86, 0x5b, 0xa7, 0xcf, 0xd1, 0x49, 0xb6, 0x46,
- 0xe8, 0xd3, 0x04, 0x96, 0xcb, 0xb2, 0x94, 0x9e, 0xcd, 0xe2, 0xdf, 0x2e, 0xd6, 0xb0, 0xd1, 0x75,
- 0x18, 0x15, 0xff, 0x76, 0xc5, 0x94, 0x4e, 0xa8, 0xec, 0xa7, 0xa3, 0xb2, 0x86, 0x9a, 0xc7, 0x44,
- 0x09, 0x4e, 0xd4, 0x45, 0x9b, 0x70, 0x46, 0xa6, 0x5e, 0xd5, 0xd7, 0xa7, 0x9c, 0x83, 0x90, 0xe5,
- 0x91, 0x1a, 0xe0, 0x3e, 0x45, 0xb3, 0xed, 0x10, 0x71, 0x7b, 0x3a, 0xf4, 0x5e, 0xd7, 0x97, 0x39,
- 0xf7, 0x18, 0x3f, 0x1e, 0x47, 0xf1, 0xbc, 0x9e, 0x04, 0xe2, 0x34, 0x3e, 0xf2, 0xe1, 0xb8, 0xeb,
- 0x65, 0xad, 0xea, 0x13, 0x8c, 0xd0, 0x47, 0xb9, 0xb3, 0x7c, 0xfb, 0x15, 0x9d, 0x09, 0xc7, 0xd9,
- 0x74, 0xd1, 0x32, 0x4c, 0x46, 0xbc, 0x60, 0xc1, 0x0d, 0x79, 0x9a, 0x1a, 0xfa, 0xec, 0x3b, 0xc9,
- 0x9a, 0x3b, 0x49, 0x57, 0xf3, 0x5a, 0x1a, 0x8c, 0xb3, 0xea, 0xbc, 0x37, 0x03, 0xd0, 0x6f, 0x5a,
- 0xb4, 0xb6, 0xc6, 0xe8, 0xa3, 0xcf, 0xc0, 0xb0, 0x3e, 0x3e, 0x82, 0x69, 0x39, 0x9f, 0xcd, 0x07,
- 0x6b, 0xc7, 0x0b, 0x7f, 0x26, 0xa8, 0x23, 0x44, 0x87, 0x61, 0x83, 0x22, 0xaa, 0x66, 0x04, 0xb9,
- 0xb8, 0xd8, 0x1d, 0x53, 0xd4, 0xbd, 0xfd, 0x23, 0x81, 0xec, 0x9d, 0x83, 0xae, 0xc3, 0x40, 0xb5,
- 0xee, 0x12, 0x2f, 0x5a, 0x2e, 0xb7, 0x0b, 0xae, 0x3a, 0x2f, 0x70, 0xc4, 0x56, 0x14, 0xd9, 0xa5,
- 0x78, 0x19, 0x56, 0x14, 0xec, 0xcb, 0x30, 0x54, 0xa9, 0x13, 0xd2, 0xe4, 0x7e, 0x5c, 0xe8, 0x29,
- 0xf6, 0x30, 0x61, 0xac, 0xa5, 0xc5, 0x58, 0x4b, 0xfd, 0xcd, 0xc1, 0x98, 0x4a, 0x09, 0xb7, 0xff,
- 0xb8, 0x00, 0xa5, 0x0e, 0x49, 0xce, 0x12, 0xfa, 0x36, 0xab, 0x2b, 0x7d, 0xdb, 0x2c, 0x8c, 0xc5,
- 0xff, 0x74, 0x51, 0x9e, 0x32, 0x86, 0xbe, 0x69, 0x82, 0x71, 0x12, 0xbf, 0x6b, 0xbf, 0x16, 0x5d,
- 0x65, 0xd7, 0xd3, 0xd1, 0x33, 0xcb, 0x50, 0xd5, 0xf7, 0x76, 0xff, 0xf6, 0xce, 0x55, 0xbb, 0xda,
- 0xdf, 0x28, 0xc0, 0x71, 0x35, 0x84, 0xdf, 0xbb, 0x03, 0xb7, 0x9e, 0x1e, 0xb8, 0x07, 0xa0, 0xb4,
- 0xb6, 0x6f, 0x40, 0x1f, 0x8f, 0xf8, 0xda, 0x05, 0xcf, 0xff, 0x98, 0x19, 0x7c, 0x5f, 0xb1, 0x99,
- 0x46, 0x00, 0xfe, 0x1f, 0xb3, 0x60, 0x2c, 0xe1, 0x20, 0x89, 0xb0, 0xe6, 0x45, 0x7f, 0x3f, 0x7c,
- 0x79, 0x16, 0xc7, 0x7f, 0x0e, 0x7a, 0xb6, 0x7c, 0x65, 0xa4, 0xac, 0x30, 0xae, 0xfa, 0x61, 0x84,
- 0x19, 0xc4, 0xfe, 0x6b, 0x0b, 0x7a, 0xd7, 0x1c, 0xd7, 0x8b, 0xa4, 0xf6, 0xc3, 0xca, 0xd1, 0x7e,
- 0x74, 0xf3, 0x5d, 0xe8, 0x25, 0xe8, 0x23, 0x1b, 0x1b, 0xa4, 0x1a, 0x89, 0x59, 0x95, 0xd1, 0x34,
- 0xfa, 0x16, 0x59, 0x29, 0x65, 0x42, 0x59, 0x63, 0xfc, 0x2f, 0x16, 0xc8, 0xe8, 0x16, 0x0c, 0x46,
- 0x6e, 0x83, 0xcc, 0xd6, 0x6a, 0xc2, 0x26, 0xe0, 0x3e, 0x42, 0xc0, 0xac, 0x49, 0x02, 0x38, 0xa6,
- 0x65, 0x7f, 0xb9, 0x00, 0x10, 0x47, 0x98, 0xeb, 0xf4, 0x89, 0x73, 0x29, 0x6d, 0xf1, 0xf9, 0x0c,
- 0x6d, 0x31, 0x8a, 0x09, 0x66, 0xa8, 0x8a, 0xd5, 0x30, 0x15, 0xbb, 0x1a, 0xa6, 0x9e, 0xc3, 0x0c,
- 0xd3, 0x3c, 0x4c, 0xc4, 0x11, 0xf2, 0xcc, 0x00, 0xa1, 0xec, 0xfe, 0x5e, 0x4b, 0x02, 0x71, 0x1a,
- 0xdf, 0x26, 0x70, 0x4e, 0x05, 0x0a, 0x13, 0x77, 0x21, 0x73, 0x25, 0xd0, 0xb5, 0xef, 0x1d, 0xc6,
- 0x29, 0x56, 0x87, 0x17, 0x72, 0xd5, 0xe1, 0xbf, 0x60, 0xc1, 0xb1, 0x64, 0x3b, 0xcc, 0xef, 0xfe,
- 0x8b, 0x16, 0x1c, 0x8f, 0x73, 0xfc, 0xa4, 0x4d, 0x10, 0x5e, 0x6c, 0x1b, 0xfc, 0x2c, 0xa7, 0xc7,
- 0x71, 0xd8, 0x96, 0x95, 0x2c, 0xd2, 0x38, 0xbb, 0x45, 0xfb, 0xdf, 0xf5, 0xc0, 0x54, 0x5e, 0xd4,
- 0x34, 0xe6, 0x69, 0xe4, 0xdc, 0xad, 0x6c, 0x93, 0x3b, 0xc2, 0x9f, 0x23, 0xf6, 0x34, 0xe2, 0xc5,
- 0x58, 0xc2, 0x93, 0x69, 0x9d, 0x0a, 0x5d, 0xa6, 0x75, 0xda, 0x82, 0x89, 0x3b, 0x5b, 0xc4, 0x5b,
- 0xf7, 0x42, 0x27, 0x72, 0xc3, 0x0d, 0x97, 0x29, 0xd0, 0xf9, 0xba, 0x79, 0x45, 0x7a, 0x5d, 0xdc,
- 0x4a, 0x22, 0x1c, 0xec, 0x95, 0xce, 0x18, 0x05, 0x71, 0x97, 0xf9, 0x41, 0x82, 0xd3, 0x44, 0xd3,
- 0x59, 0xb1, 0x7a, 0x1e, 0x72, 0x56, 0xac, 0x86, 0x2b, 0xcc, 0x6e, 0xa4, 0x1b, 0x09, 0x7b, 0xb6,
- 0xae, 0xa8, 0x52, 0xac, 0x61, 0xa0, 0x4f, 0x01, 0xd2, 0xd3, 0x1a, 0x1a, 0x41, 0x6b, 0x9f, 0xdb,
- 0xdf, 0x2b, 0xa1, 0xd5, 0x14, 0xf4, 0x60, 0xaf, 0x34, 0x49, 0x4b, 0x97, 0x3d, 0xfa, 0xfc, 0x8d,
- 0x23, 0xfd, 0x65, 0x10, 0x42, 0xb7, 0x60, 0x9c, 0x96, 0xb2, 0x1d, 0x25, 0x23, 0xe2, 0xf2, 0x27,
- 0xeb, 0x33, 0xfb, 0x7b, 0xa5, 0xf1, 0xd5, 0x04, 0x2c, 0x8f, 0x74, 0x8a, 0x48, 0x46, 0x72, 0xac,
- 0x81, 0x6e, 0x93, 0x63, 0xd9, 0x5f, 0xb4, 0xe0, 0x14, 0xbd, 0xe0, 0x6a, 0xd7, 0x73, 0xb4, 0xe8,
- 0x4e, 0xd3, 0xe5, 0x7a, 0x1a, 0x71, 0xd5, 0x30, 0x59, 0x5d, 0x79, 0x99, 0x6b, 0x69, 0x14, 0x94,
- 0x9e, 0xf0, 0xdb, 0xae, 0x57, 0x4b, 0x9e, 0xf0, 0xd7, 0x5c, 0xaf, 0x86, 0x19, 0x44, 0x5d, 0x59,
- 0xc5, 0xdc, 0x08, 0xfb, 0x5f, 0xa3, 0x7b, 0x95, 0xf6, 0xe5, 0x3b, 0xda, 0x0d, 0xf4, 0x8c, 0xae,
- 0x53, 0x15, 0xe6, 0x93, 0xb9, 0xfa, 0xd4, 0x2f, 0x58, 0x20, 0xbc, 0xdf, 0xbb, 0xb8, 0x93, 0xdf,
- 0x86, 0xe1, 0x9d, 0x74, 0xca, 0xd7, 0x73, 0xf9, 0xe1, 0x00, 0x44, 0xa2, 0x57, 0xc5, 0xa2, 0x1b,
- 0xe9, 0x5d, 0x0d, 0x5a, 0x76, 0x0d, 0x04, 0x74, 0x81, 0x30, 0xad, 0x46, 0xe7, 0xde, 0x3c, 0x0f,
- 0x50, 0x63, 0xb8, 0x2c, 0x0f, 0x7c, 0xc1, 0xe4, 0xb8, 0x16, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0xaf,
- 0x16, 0x61, 0x48, 0xa6, 0x18, 0x6d, 0x79, 0xdd, 0xc8, 0x1e, 0x75, 0xc6, 0xa9, 0xd0, 0x91, 0x71,
- 0x7a, 0x07, 0x26, 0x02, 0x52, 0x6d, 0x05, 0xa1, 0xbb, 0x43, 0x24, 0x58, 0x6c, 0x92, 0x19, 0x9e,
- 0xe0, 0x21, 0x01, 0x3c, 0x60, 0x21, 0xb2, 0x12, 0x85, 0x4c, 0x69, 0x9c, 0x26, 0x84, 0x2e, 0xc2,
- 0x20, 0x13, 0xbd, 0x97, 0x63, 0x81, 0xb0, 0x12, 0x7c, 0xad, 0x48, 0x00, 0x8e, 0x71, 0xd8, 0xe3,
- 0xa0, 0x75, 0x9b, 0xa1, 0x27, 0x3c, 0xc1, 0x2b, 0xbc, 0x18, 0x4b, 0x38, 0xfa, 0x38, 0x8c, 0xf3,
- 0x7a, 0x81, 0xdf, 0x74, 0x36, 0xb9, 0x4a, 0xb0, 0x57, 0x85, 0xd7, 0x19, 0x5f, 0x49, 0xc0, 0x0e,
- 0xf6, 0x4a, 0xc7, 0x92, 0x65, 0xac, 0xdb, 0x29, 0x2a, 0xcc, 0xf2, 0x8f, 0x37, 0x42, 0xef, 0x8c,
- 0x94, 0xc1, 0x60, 0x0c, 0xc2, 0x3a, 0x9e, 0xfd, 0xcf, 0x16, 0x4c, 0x68, 0x53, 0xd5, 0x75, 0x8e,
- 0x0d, 0x63, 0x90, 0x0a, 0x5d, 0x0c, 0xd2, 0xe1, 0xa2, 0x3d, 0x64, 0xce, 0x70, 0xcf, 0x03, 0x9a,
- 0x61, 0xfb, 0x33, 0x80, 0xd2, 0xf9, 0x6b, 0xd1, 0x9b, 0xdc, 0x90, 0xdf, 0x0d, 0x48, 0xad, 0x9d,
- 0xc2, 0x5f, 0x8f, 0x9c, 0x23, 0x3d, 0x57, 0x79, 0x2d, 0xac, 0xea, 0xdb, 0x3f, 0xde, 0x03, 0xe3,
- 0xc9, 0x58, 0x1d, 0xe8, 0x2a, 0xf4, 0x71, 0x2e, 0x5d, 0x90, 0x6f, 0x63, 0x4f, 0xa6, 0x45, 0xf8,
- 0xe0, 0xf9, 0x6f, 0x38, 0x77, 0x2f, 0xea, 0xa3, 0x77, 0x60, 0xa8, 0xe6, 0xdf, 0xf1, 0xee, 0x38,
- 0x41, 0x6d, 0xb6, 0xbc, 0x2c, 0x4e, 0x88, 0x4c, 0x01, 0xd4, 0x42, 0x8c, 0xa6, 0x47, 0x0d, 0x61,
- 0xb6, 0x13, 0x31, 0x08, 0xeb, 0xe4, 0xd0, 0x1a, 0x4b, 0xc9, 0xb4, 0xe1, 0x6e, 0xae, 0x38, 0xcd,
- 0x76, 0x5e, 0x5d, 0xf3, 0x12, 0x49, 0xa3, 0x3c, 0x22, 0xf2, 0x36, 0x71, 0x00, 0x8e, 0x09, 0xa1,
- 0xcf, 0xc1, 0x64, 0x98, 0xa3, 0x12, 0xcb, 0x4b, 0x67, 0xde, 0x4e, 0x4b, 0xc4, 0x85, 0x29, 0x59,
- 0xca, 0xb3, 0xac, 0x66, 0xd0, 0x5d, 0x40, 0x42, 0xf4, 0xbc, 0x16, 0xb4, 0xc2, 0x68, 0xae, 0xe5,
- 0xd5, 0xea, 0x32, 0x65, 0xd3, 0x87, 0xb3, 0xe5, 0x04, 0x49, 0x6c, 0xad, 0x6d, 0x16, 0x12, 0x38,
- 0x8d, 0x81, 0x33, 0xda, 0xb0, 0xbf, 0xd0, 0x03, 0xd3, 0x32, 0x61, 0x74, 0x86, 0xf7, 0xca, 0xe7,
- 0xad, 0x84, 0xfb, 0xca, 0x2b, 0xf9, 0x07, 0xfd, 0x43, 0x73, 0x62, 0xf9, 0x52, 0xda, 0x89, 0xe5,
- 0xb5, 0x43, 0x76, 0xe3, 0x81, 0xb9, 0xb2, 0x7c, 0xcf, 0xfa, 0x9f, 0xec, 0x1f, 0x03, 0xe3, 0x6a,
- 0x46, 0x98, 0xc7, 0x5b, 0x2f, 0x4b, 0xd5, 0x51, 0xce, 0xf3, 0xff, 0xaa, 0xc0, 0x31, 0x2e, 0xfb,
- 0x61, 0x19, 0x95, 0x9d, 0x9d, 0xb3, 0x8a, 0x0e, 0xa5, 0x49, 0x1a, 0xcd, 0x68, 0x77, 0xc1, 0x0d,
- 0x44, 0x8f, 0x33, 0x69, 0x2e, 0x0a, 0x9c, 0x34, 0x4d, 0x09, 0xc1, 0x8a, 0x0e, 0xda, 0x81, 0x89,
- 0x4d, 0x16, 0xf1, 0x49, 0xcb, 0xdd, 0x2c, 0xce, 0x85, 0xcc, 0x7d, 0x7b, 0x65, 0x7e, 0x31, 0x3f,
- 0xd1, 0x33, 0x7f, 0xfc, 0xa5, 0x50, 0x70, 0xba, 0x09, 0xba, 0x35, 0x8e, 0x39, 0x77, 0xc2, 0xc5,
- 0xba, 0x13, 0x46, 0x6e, 0x75, 0xae, 0xee, 0x57, 0xb7, 0x2b, 0x91, 0x1f, 0xc8, 0x04, 0x8f, 0x99,
- 0x6f, 0xaf, 0xd9, 0x5b, 0x95, 0x14, 0xbe, 0xd1, 0xfc, 0xd4, 0xfe, 0x5e, 0xe9, 0x58, 0x16, 0x16,
- 0xce, 0x6c, 0x0b, 0xad, 0x42, 0xff, 0xa6, 0x1b, 0x61, 0xd2, 0xf4, 0xc5, 0x69, 0x91, 0x79, 0x14,
- 0x5e, 0xe1, 0x28, 0x46, 0x4b, 0x2c, 0x22, 0x95, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xa9, 0x2e, 0x81,
- 0xbe, 0x7c, 0x01, 0x6c, 0xda, 0xf6, 0x2e, 0xf3, 0x1a, 0x78, 0x1d, 0x8a, 0xde, 0x46, 0xd8, 0x2e,
- 0x16, 0xcf, 0xea, 0x92, 0x21, 0x3f, 0x9b, 0xeb, 0xa7, 0x4f, 0xe3, 0xd5, 0xa5, 0x0a, 0xa6, 0x15,
- 0x99, 0xdb, 0x6b, 0x58, 0x0d, 0x5d, 0x91, 0x2c, 0x2a, 0xd3, 0x0b, 0x78, 0xb9, 0x32, 0x5f, 0x59,
- 0x36, 0x68, 0xb0, 0xa8, 0x86, 0xac, 0x18, 0xf3, 0xea, 0xe8, 0x26, 0x0c, 0x6e, 0xf2, 0x83, 0x6f,
- 0x23, 0x14, 0x49, 0xe3, 0x33, 0x2f, 0xa3, 0x2b, 0x12, 0xc9, 0xa0, 0xc7, 0xae, 0x0c, 0x05, 0xc2,
- 0x31, 0x29, 0xf4, 0x05, 0x0b, 0x8e, 0x27, 0xb3, 0xee, 0x33, 0x67, 0x35, 0x61, 0xa6, 0x96, 0xe9,
- 0x00, 0x50, 0xce, 0xaa, 0x60, 0x34, 0xc8, 0xd4, 0x2f, 0x99, 0x68, 0x38, 0xbb, 0x39, 0x3a, 0xd0,
- 0xc1, 0xed, 0x5a, 0xbb, 0xfc, 0x42, 0x89, 0xc0, 0x44, 0x7c, 0xa0, 0xf1, 0xdc, 0x02, 0xa6, 0x15,
- 0xd1, 0x1a, 0xc0, 0x46, 0x9d, 0x88, 0x88, 0x8f, 0xc2, 0x28, 0x2a, 0xf3, 0xf6, 0x5f, 0x52, 0x58,
- 0x82, 0x0e, 0x7b, 0x89, 0xc6, 0xa5, 0x58, 0xa3, 0x43, 0x97, 0x52, 0xd5, 0xf5, 0x6a, 0x24, 0x60,
- 0xca, 0xad, 0x9c, 0xa5, 0x34, 0xcf, 0x30, 0xd2, 0x4b, 0x89, 0x97, 0x63, 0x41, 0x81, 0xd1, 0x22,
- 0xcd, 0xad, 0x8d, 0xb0, 0x5d, 0x26, 0x8b, 0x79, 0xd2, 0xdc, 0x4a, 0x2c, 0x28, 0x4e, 0x8b, 0x95,
- 0x63, 0x41, 0x81, 0x6e, 0x99, 0x0d, 0xba, 0x81, 0x48, 0x30, 0x35, 0x96, 0xbf, 0x65, 0x96, 0x38,
- 0x4a, 0x7a, 0xcb, 0x08, 0x00, 0x96, 0x44, 0xd0, 0xa7, 0x4d, 0x6e, 0x67, 0x9c, 0xd1, 0x7c, 0xa6,
- 0x03, 0xb7, 0x63, 0xd0, 0x6d, 0xcf, 0xef, 0xbc, 0x02, 0x85, 0x8d, 0x2a, 0x53, 0x8a, 0xe5, 0xe8,
- 0x0c, 0x96, 0xe6, 0x0d, 0x6a, 0x2c, 0x32, 0xfc, 0xd2, 0x3c, 0x2e, 0x6c, 0x54, 0xe9, 0xd2, 0x77,
- 0xee, 0xb5, 0x02, 0xb2, 0xe4, 0xd6, 0x89, 0xc8, 0x6a, 0x91, 0xb9, 0xf4, 0x67, 0x25, 0x52, 0x7a,
- 0xe9, 0x2b, 0x10, 0x8e, 0x49, 0x51, 0xba, 0x31, 0x0f, 0x36, 0x99, 0x4f, 0x57, 0xb1, 0x5a, 0x69,
- 0xba, 0x99, 0x5c, 0xd8, 0x36, 0x8c, 0xec, 0x84, 0xcd, 0x2d, 0x22, 0x4f, 0x45, 0xa6, 0xae, 0xcb,
- 0x89, 0x54, 0x71, 0x53, 0x20, 0xba, 0x41, 0xd4, 0x72, 0xea, 0xa9, 0x83, 0x9c, 0x89, 0x56, 0x6e,
- 0xea, 0xc4, 0xb0, 0x49, 0x9b, 0x2e, 0x84, 0x77, 0x79, 0x38, 0x39, 0xa6, 0xb8, 0xcb, 0x59, 0x08,
- 0x19, 0x11, 0xe7, 0xf8, 0x42, 0x10, 0x00, 0x2c, 0x89, 0xa8, 0xc1, 0x66, 0x17, 0xd0, 0x89, 0x0e,
- 0x83, 0x9d, 0xea, 0x6f, 0x3c, 0xd8, 0xec, 0xc2, 0x89, 0x49, 0xb1, 0x8b, 0xa6, 0xb9, 0xe5, 0x47,
- 0xbe, 0x97, 0xb8, 0xe4, 0x4e, 0xe6, 0x5f, 0x34, 0xe5, 0x0c, 0xfc, 0xf4, 0x45, 0x93, 0x85, 0x85,
- 0x33, 0xdb, 0xa2, 0x1f, 0xd7, 0x94, 0x91, 0x01, 0x45, 0xe6, 0x8d, 0xa7, 0x72, 0x02, 0x6b, 0xa6,
- 0xc3, 0x07, 0xf2, 0x8f, 0x53, 0x20, 0x1c, 0x93, 0x42, 0x35, 0x18, 0x6d, 0x1a, 0x11, 0x67, 0x59,
- 0x06, 0x91, 0x1c, 0xbe, 0x20, 0x2b, 0x36, 0x2d, 0x97, 0x10, 0x99, 0x10, 0x9c, 0xa0, 0xc9, 0x2c,
- 0xf7, 0xb8, 0xab, 0x1f, 0x4b, 0x30, 0x92, 0x33, 0xd5, 0x19, 0xde, 0x80, 0x7c, 0xaa, 0x05, 0x00,
- 0x4b, 0x22, 0x74, 0x34, 0x84, 0x83, 0x9a, 0x1f, 0xb2, 0x3c, 0x3d, 0x79, 0x0a, 0xf6, 0x2c, 0x35,
- 0x91, 0x0c, 0xb3, 0x2e, 0x40, 0x38, 0x26, 0x45, 0x4f, 0x72, 0x7a, 0xe1, 0x9d, 0xce, 0x3f, 0xc9,
- 0x93, 0xd7, 0x1d, 0x3b, 0xc9, 0xe9, 0x65, 0x57, 0x14, 0x57, 0x9d, 0x8a, 0x0a, 0xce, 0x72, 0x8c,
- 0xe4, 0xf4, 0x4b, 0x85, 0x15, 0x4f, 0xf7, 0x4b, 0x81, 0x70, 0x4c, 0x8a, 0x5d, 0xc5, 0x2c, 0x34,
- 0xdd, 0xd9, 0x36, 0x57, 0x31, 0x45, 0xc8, 0xb8, 0x8a, 0xb5, 0xd0, 0x75, 0xf6, 0x8f, 0x17, 0xe0,
- 0x6c, 0xfb, 0x7d, 0x1b, 0xeb, 0xd0, 0xca, 0xb1, 0xcd, 0x52, 0x42, 0x87, 0xc6, 0x25, 0x3a, 0x31,
- 0x56, 0xd7, 0x01, 0x87, 0xaf, 0xc0, 0x84, 0x72, 0x47, 0xac, 0xbb, 0xd5, 0x5d, 0x2d, 0xb1, 0xa8,
- 0x0a, 0xcd, 0x53, 0x49, 0x22, 0xe0, 0x74, 0x1d, 0x34, 0x0b, 0x63, 0x46, 0xe1, 0xf2, 0x82, 0x78,
- 0xfe, 0xc7, 0xd9, 0x31, 0x4c, 0x30, 0x4e, 0xe2, 0xdb, 0xbf, 0x66, 0xc1, 0xc9, 0x9c, 0x3c, 0xf3,
- 0x5d, 0xc7, 0xd3, 0xdd, 0x80, 0xb1, 0xa6, 0x59, 0xb5, 0x43, 0x08, 0x70, 0x23, 0x9b, 0xbd, 0xea,
- 0x6b, 0x02, 0x80, 0x93, 0x44, 0xed, 0x5f, 0x29, 0xc0, 0x99, 0xb6, 0xf6, 0xf5, 0x08, 0xc3, 0x89,
- 0xcd, 0x46, 0xe8, 0xcc, 0x07, 0xa4, 0x46, 0xbc, 0xc8, 0x75, 0xea, 0x95, 0x26, 0xa9, 0x6a, 0x5a,
- 0x50, 0x66, 0xa8, 0x7e, 0x65, 0xa5, 0x32, 0x9b, 0xc6, 0xc0, 0x39, 0x35, 0xd1, 0x12, 0xa0, 0x34,
- 0x44, 0xcc, 0x30, 0x7b, 0xe2, 0xa6, 0xe9, 0xe1, 0x8c, 0x1a, 0xe8, 0x65, 0x18, 0x51, 0x76, 0xfb,
- 0xda, 0x8c, 0xb3, 0x0b, 0x02, 0xeb, 0x00, 0x6c, 0xe2, 0xa1, 0x4b, 0x3c, 0x6d, 0x92, 0x48, 0xb0,
- 0x25, 0x54, 0xa6, 0x63, 0x32, 0x27, 0x92, 0x28, 0xc6, 0x3a, 0xce, 0xdc, 0xe5, 0x3f, 0xfd, 0xf6,
- 0xd9, 0x0f, 0xfd, 0xc5, 0xb7, 0xcf, 0x7e, 0xe8, 0xaf, 0xbe, 0x7d, 0xf6, 0x43, 0x3f, 0xb4, 0x7f,
- 0xd6, 0xfa, 0xd3, 0xfd, 0xb3, 0xd6, 0x5f, 0xec, 0x9f, 0xb5, 0xfe, 0x6a, 0xff, 0xac, 0xf5, 0xff,
- 0xee, 0x9f, 0xb5, 0xbe, 0xfc, 0xb7, 0x67, 0x3f, 0xf4, 0x36, 0x8a, 0x23, 0x54, 0x5f, 0xa4, 0xb3,
- 0x73, 0x71, 0xe7, 0xd2, 0x7f, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x60, 0x45, 0x7a, 0xd6, 0xa3, 0x24,
- 0x01, 0x00,
+ // 16206 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x1c, 0xc9,
+ 0x75, 0x30, 0xc6, 0xea, 0x9e, 0xf3, 0xcd, 0x9d, 0xb8, 0x06, 0xb3, 0x00, 0x1a, 0x5b, 0xbb, 0x8b,
+ 0xc5, 0x5e, 0x03, 0x62, 0x0f, 0x2e, 0xb8, 0xbb, 0x5c, 0xed, 0x9c, 0x40, 0x2f, 0x30, 0x83, 0xde,
+ 0xec, 0x01, 0x40, 0x2e, 0x97, 0x14, 0x0b, 0xdd, 0x39, 0x33, 0xc5, 0xe9, 0xae, 0xea, 0xad, 0xaa,
+ 0x1e, 0x60, 0x60, 0x2a, 0x24, 0x51, 0x16, 0x25, 0x52, 0x72, 0x04, 0x43, 0x21, 0x59, 0x0e, 0x4a,
+ 0xa1, 0x1f, 0xba, 0x65, 0x5a, 0xb2, 0x68, 0xc9, 0x92, 0x2c, 0xea, 0xb2, 0x2d, 0x47, 0xc8, 0xfe,
+ 0x21, 0x4b, 0x8a, 0x30, 0xa9, 0xb0, 0xc2, 0x23, 0x73, 0x6c, 0x87, 0x42, 0x3f, 0x2c, 0x29, 0x64,
+ 0xff, 0xb0, 0x27, 0xf4, 0x7d, 0xfc, 0x22, 0xcf, 0xca, 0xac, 0xa3, 0xbb, 0x07, 0x0b, 0x0c, 0x97,
+ 0x8c, 0xfd, 0xd7, 0x9d, 0xef, 0xe5, 0xcb, 0xac, 0x3c, 0x5f, 0xbe, 0x13, 0xec, 0xad, 0x4b, 0xe1,
+ 0xac, 0xeb, 0x5f, 0x70, 0x5a, 0xee, 0x85, 0x9a, 0x1f, 0x90, 0x0b, 0xdb, 0x17, 0x2f, 0x6c, 0x10,
+ 0x8f, 0x04, 0x4e, 0x44, 0xea, 0xb3, 0xad, 0xc0, 0x8f, 0x7c, 0x84, 0x38, 0xce, 0xac, 0xd3, 0x72,
+ 0x67, 0x29, 0xce, 0xec, 0xf6, 0xc5, 0x99, 0xe7, 0x36, 0xdc, 0x68, 0xb3, 0x7d, 0x7b, 0xb6, 0xe6,
+ 0x37, 0x2f, 0x6c, 0xf8, 0x1b, 0xfe, 0x05, 0x86, 0x7a, 0xbb, 0xbd, 0xce, 0xfe, 0xb1, 0x3f, 0xec,
+ 0x17, 0x27, 0x31, 0xf3, 0x62, 0xdc, 0x4c, 0xd3, 0xa9, 0x6d, 0xba, 0x1e, 0x09, 0x76, 0x2e, 0xb4,
+ 0xb6, 0x36, 0x58, 0xbb, 0x01, 0x09, 0xfd, 0x76, 0x50, 0x23, 0xc9, 0x86, 0x3b, 0xd6, 0x0a, 0x2f,
+ 0x34, 0x49, 0xe4, 0x64, 0x74, 0x77, 0xe6, 0x42, 0x5e, 0xad, 0xa0, 0xed, 0x45, 0x6e, 0x33, 0xdd,
+ 0xcc, 0x47, 0xba, 0x55, 0x08, 0x6b, 0x9b, 0xa4, 0xe9, 0xa4, 0xea, 0xbd, 0x90, 0x57, 0xaf, 0x1d,
+ 0xb9, 0x8d, 0x0b, 0xae, 0x17, 0x85, 0x51, 0x90, 0xac, 0x64, 0x7f, 0xd3, 0x82, 0xb3, 0x73, 0xb7,
+ 0xaa, 0x4b, 0x0d, 0x27, 0x8c, 0xdc, 0xda, 0x7c, 0xc3, 0xaf, 0x6d, 0x55, 0x23, 0x3f, 0x20, 0x37,
+ 0xfd, 0x46, 0xbb, 0x49, 0xaa, 0x6c, 0x20, 0xd0, 0xb3, 0x30, 0xb4, 0xcd, 0xfe, 0x97, 0x17, 0xa7,
+ 0xad, 0xb3, 0xd6, 0xf9, 0xe1, 0xf9, 0xc9, 0xbf, 0xd8, 0x2d, 0x7d, 0x68, 0x6f, 0xb7, 0x34, 0x74,
+ 0x53, 0x94, 0x63, 0x85, 0x81, 0xce, 0xc1, 0xc0, 0x7a, 0xb8, 0xb6, 0xd3, 0x22, 0xd3, 0x05, 0x86,
+ 0x3b, 0x2e, 0x70, 0x07, 0x96, 0xab, 0xb4, 0x14, 0x0b, 0x28, 0xba, 0x00, 0xc3, 0x2d, 0x27, 0x88,
+ 0xdc, 0xc8, 0xf5, 0xbd, 0xe9, 0xe2, 0x59, 0xeb, 0x7c, 0xff, 0xfc, 0x94, 0x40, 0x1d, 0xae, 0x48,
+ 0x00, 0x8e, 0x71, 0x68, 0x37, 0x02, 0xe2, 0xd4, 0xaf, 0x7b, 0x8d, 0x9d, 0xe9, 0xbe, 0xb3, 0xd6,
+ 0xf9, 0xa1, 0xb8, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0x7f, 0xa5, 0x00, 0x43, 0x73, 0xeb, 0xeb,
+ 0xae, 0xe7, 0x46, 0x3b, 0xe8, 0x26, 0x8c, 0x7a, 0x7e, 0x9d, 0xc8, 0xff, 0xec, 0x2b, 0x46, 0x9e,
+ 0x3f, 0x3b, 0x9b, 0x5e, 0x4a, 0xb3, 0xab, 0x1a, 0xde, 0xfc, 0xe4, 0xde, 0x6e, 0x69, 0x54, 0x2f,
+ 0xc1, 0x06, 0x1d, 0x84, 0x61, 0xa4, 0xe5, 0xd7, 0x15, 0xd9, 0x02, 0x23, 0x5b, 0xca, 0x22, 0x5b,
+ 0x89, 0xd1, 0xe6, 0x27, 0xf6, 0x76, 0x4b, 0x23, 0x5a, 0x01, 0xd6, 0x89, 0xa0, 0xdb, 0x30, 0x41,
+ 0xff, 0x7a, 0x91, 0xab, 0xe8, 0x16, 0x19, 0xdd, 0xc7, 0xf2, 0xe8, 0x6a, 0xa8, 0xf3, 0x47, 0xf6,
+ 0x76, 0x4b, 0x13, 0x89, 0x42, 0x9c, 0x24, 0x68, 0xff, 0xa4, 0x05, 0x13, 0x73, 0xad, 0xd6, 0x5c,
+ 0xd0, 0xf4, 0x83, 0x4a, 0xe0, 0xaf, 0xbb, 0x0d, 0x82, 0x5e, 0x86, 0xbe, 0x88, 0xce, 0x1a, 0x9f,
+ 0xe1, 0xc7, 0xc4, 0xd0, 0xf6, 0xd1, 0xb9, 0xda, 0xdf, 0x2d, 0x1d, 0x49, 0xa0, 0xb3, 0xa9, 0x64,
+ 0x15, 0xd0, 0x1b, 0x30, 0xd9, 0xf0, 0x6b, 0x4e, 0x63, 0xd3, 0x0f, 0x23, 0x01, 0x15, 0x53, 0x7f,
+ 0x74, 0x6f, 0xb7, 0x34, 0x79, 0x2d, 0x01, 0xc3, 0x29, 0x6c, 0xfb, 0x1e, 0x8c, 0xcf, 0x45, 0x91,
+ 0x53, 0xdb, 0x24, 0x75, 0xbe, 0xa0, 0xd0, 0x8b, 0xd0, 0xe7, 0x39, 0x4d, 0xd9, 0x99, 0xb3, 0xb2,
+ 0x33, 0xab, 0x4e, 0x93, 0x76, 0x66, 0xf2, 0x86, 0xe7, 0xbe, 0xdb, 0x16, 0x8b, 0x94, 0x96, 0x61,
+ 0x86, 0x8d, 0x9e, 0x07, 0xa8, 0x93, 0x6d, 0xb7, 0x46, 0x2a, 0x4e, 0xb4, 0x29, 0xfa, 0x80, 0x44,
+ 0x5d, 0x58, 0x54, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x61, 0x78, 0x6e, 0xdb, 0x77, 0xeb, 0x15, 0xbf,
+ 0x1e, 0xa2, 0x2d, 0x98, 0x68, 0x05, 0x64, 0x9d, 0x04, 0xaa, 0x68, 0xda, 0x3a, 0x5b, 0x3c, 0x3f,
+ 0xf2, 0xfc, 0xf9, 0xcc, 0xb1, 0x37, 0x51, 0x97, 0xbc, 0x28, 0xd8, 0x99, 0x3f, 0x21, 0xda, 0x9b,
+ 0x48, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0xe7, 0x05, 0x38, 0x36, 0x77, 0xaf, 0x1d, 0x90, 0x45, 0x37,
+ 0xdc, 0x4a, 0x6e, 0xb8, 0xba, 0x1b, 0x6e, 0xad, 0xc6, 0x23, 0xa0, 0x56, 0xfa, 0xa2, 0x28, 0xc7,
+ 0x0a, 0x03, 0x3d, 0x07, 0x83, 0xf4, 0xf7, 0x0d, 0x5c, 0x16, 0x9f, 0x7c, 0x44, 0x20, 0x8f, 0x2c,
+ 0x3a, 0x91, 0xb3, 0xc8, 0x41, 0x58, 0xe2, 0xa0, 0x15, 0x18, 0xa9, 0xb1, 0xf3, 0x61, 0x63, 0xc5,
+ 0xaf, 0x13, 0xb6, 0xb6, 0x86, 0xe7, 0x9f, 0xa1, 0xe8, 0x0b, 0x71, 0xf1, 0xfe, 0x6e, 0x69, 0x9a,
+ 0xf7, 0x4d, 0x90, 0xd0, 0x60, 0x58, 0xaf, 0x8f, 0x6c, 0xb5, 0xdd, 0xfb, 0x18, 0x25, 0xc8, 0xd8,
+ 0xea, 0xe7, 0xb5, 0x9d, 0xdb, 0xcf, 0x76, 0xee, 0x68, 0xf6, 0xae, 0x45, 0x17, 0xa1, 0x6f, 0xcb,
+ 0xf5, 0xea, 0xd3, 0x03, 0x8c, 0xd6, 0x69, 0x3a, 0xe7, 0x57, 0x5d, 0xaf, 0xbe, 0xbf, 0x5b, 0x9a,
+ 0x32, 0xba, 0x43, 0x0b, 0x31, 0x43, 0xb5, 0xff, 0x1f, 0x0b, 0x4a, 0x0c, 0xb6, 0xec, 0x36, 0x48,
+ 0x85, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x1e, 0x20, 0x24, 0xb5, 0x80, 0x44,
+ 0xda, 0x90, 0xaa, 0x85, 0x51, 0x55, 0x10, 0xac, 0x61, 0xd1, 0xf3, 0x29, 0xdc, 0x74, 0x02, 0xb6,
+ 0xbe, 0xc4, 0xc0, 0xaa, 0xf3, 0xa9, 0x2a, 0x01, 0x38, 0xc6, 0x31, 0xce, 0xa7, 0x62, 0xb7, 0xf3,
+ 0x09, 0x7d, 0x0c, 0x26, 0xe2, 0xc6, 0xc2, 0x96, 0x53, 0x93, 0x03, 0xc8, 0x76, 0x70, 0xd5, 0x04,
+ 0xe1, 0x24, 0xae, 0xfd, 0x9f, 0x5b, 0x62, 0xf1, 0xd0, 0xaf, 0x7e, 0x9f, 0x7f, 0xab, 0xfd, 0x07,
+ 0x16, 0x0c, 0xce, 0xbb, 0x5e, 0xdd, 0xf5, 0x36, 0xd0, 0x67, 0x60, 0x88, 0x5e, 0x95, 0x75, 0x27,
+ 0x72, 0xc4, 0x31, 0xfc, 0x61, 0x6d, 0x6f, 0xa9, 0x9b, 0x6b, 0xb6, 0xb5, 0xb5, 0x41, 0x0b, 0xc2,
+ 0x59, 0x8a, 0x4d, 0x77, 0xdb, 0xf5, 0xdb, 0x9f, 0x25, 0xb5, 0x68, 0x85, 0x44, 0x4e, 0xfc, 0x39,
+ 0x71, 0x19, 0x56, 0x54, 0xd1, 0x55, 0x18, 0x88, 0x9c, 0x60, 0x83, 0x44, 0xe2, 0x3c, 0xce, 0x3c,
+ 0x37, 0x79, 0x4d, 0x4c, 0x77, 0x24, 0xf1, 0x6a, 0x24, 0xbe, 0xa5, 0xd6, 0x58, 0x55, 0x2c, 0x48,
+ 0xd8, 0xff, 0x6e, 0x10, 0x4e, 0x2e, 0x54, 0xcb, 0x39, 0xeb, 0xea, 0x1c, 0x0c, 0xd4, 0x03, 0x77,
+ 0x9b, 0x04, 0x62, 0x9c, 0x15, 0x95, 0x45, 0x56, 0x8a, 0x05, 0x14, 0x5d, 0x82, 0x51, 0x7e, 0x3f,
+ 0x5e, 0x71, 0xbc, 0x7a, 0x7c, 0x3c, 0x0a, 0xec, 0xd1, 0x9b, 0x1a, 0x0c, 0x1b, 0x98, 0x07, 0x5c,
+ 0x54, 0xe7, 0x12, 0x9b, 0x31, 0xef, 0xee, 0xfd, 0xa2, 0x05, 0x93, 0xbc, 0x99, 0xb9, 0x28, 0x0a,
+ 0xdc, 0xdb, 0xed, 0x88, 0x84, 0xd3, 0xfd, 0xec, 0xa4, 0x5b, 0xc8, 0x1a, 0xad, 0xdc, 0x11, 0x98,
+ 0xbd, 0x99, 0xa0, 0xc2, 0x0f, 0xc1, 0x69, 0xd1, 0xee, 0x64, 0x12, 0x8c, 0x53, 0xcd, 0xa2, 0x1f,
+ 0xb1, 0x60, 0xa6, 0xe6, 0x7b, 0x51, 0xe0, 0x37, 0x1a, 0x24, 0xa8, 0xb4, 0x6f, 0x37, 0xdc, 0x70,
+ 0x93, 0xaf, 0x53, 0x4c, 0xd6, 0xd9, 0x49, 0x90, 0x33, 0x87, 0x0a, 0x49, 0xcc, 0xe1, 0x99, 0xbd,
+ 0xdd, 0xd2, 0xcc, 0x42, 0x2e, 0x29, 0xdc, 0xa1, 0x19, 0xb4, 0x05, 0x88, 0xde, 0xec, 0xd5, 0xc8,
+ 0xd9, 0x20, 0x71, 0xe3, 0x83, 0xbd, 0x37, 0x7e, 0x7c, 0x6f, 0xb7, 0x84, 0x56, 0x53, 0x24, 0x70,
+ 0x06, 0x59, 0xf4, 0x2e, 0x1c, 0xa5, 0xa5, 0xa9, 0x6f, 0x1d, 0xea, 0xbd, 0xb9, 0xe9, 0xbd, 0xdd,
+ 0xd2, 0xd1, 0xd5, 0x0c, 0x22, 0x38, 0x93, 0x34, 0xfa, 0x21, 0x0b, 0x4e, 0xc6, 0x9f, 0xbf, 0x74,
+ 0xb7, 0xe5, 0x78, 0xf5, 0xb8, 0xe1, 0xe1, 0xde, 0x1b, 0xa6, 0x67, 0xf2, 0xc9, 0x85, 0x3c, 0x4a,
+ 0x38, 0xbf, 0x11, 0xe4, 0xc1, 0x11, 0xda, 0xb5, 0x64, 0xdb, 0xd0, 0x7b, 0xdb, 0x27, 0xf6, 0x76,
+ 0x4b, 0x47, 0x56, 0xd3, 0x34, 0x70, 0x16, 0xe1, 0x99, 0x05, 0x38, 0x96, 0xb9, 0x3a, 0xd1, 0x24,
+ 0x14, 0xb7, 0x08, 0x67, 0x02, 0x87, 0x31, 0xfd, 0x89, 0x8e, 0x42, 0xff, 0xb6, 0xd3, 0x68, 0x8b,
+ 0x8d, 0x89, 0xf9, 0x9f, 0x57, 0x0a, 0x97, 0x2c, 0xfb, 0x7f, 0x28, 0xc2, 0xc4, 0x42, 0xb5, 0x7c,
+ 0x5f, 0xbb, 0x5e, 0xbf, 0xf6, 0x0a, 0x1d, 0xaf, 0xbd, 0xf8, 0x12, 0x2d, 0xe6, 0x5e, 0xa2, 0x3f,
+ 0x98, 0xb1, 0x65, 0xfb, 0xd8, 0x96, 0xfd, 0x68, 0xce, 0x96, 0x7d, 0xc0, 0x1b, 0x75, 0x3b, 0x67,
+ 0xd5, 0xf6, 0xb3, 0x09, 0xcc, 0xe4, 0x90, 0x18, 0xef, 0x97, 0x3c, 0x6a, 0x0f, 0xb8, 0x74, 0x1f,
+ 0xcc, 0x3c, 0xd6, 0x60, 0x74, 0xc1, 0x69, 0x39, 0xb7, 0xdd, 0x86, 0x1b, 0xb9, 0x24, 0x44, 0x4f,
+ 0x42, 0xd1, 0xa9, 0xd7, 0x19, 0x77, 0x37, 0x3c, 0x7f, 0x6c, 0x6f, 0xb7, 0x54, 0x9c, 0xab, 0x53,
+ 0x36, 0x03, 0x14, 0xd6, 0x0e, 0xa6, 0x18, 0xe8, 0x69, 0xe8, 0xab, 0x07, 0x7e, 0x6b, 0xba, 0xc0,
+ 0x30, 0xe9, 0x2e, 0xef, 0x5b, 0x0c, 0xfc, 0x56, 0x02, 0x95, 0xe1, 0xd8, 0x7f, 0x56, 0x80, 0x53,
+ 0x0b, 0xa4, 0xb5, 0xb9, 0x5c, 0xcd, 0xb9, 0x2f, 0xce, 0xc3, 0x50, 0xd3, 0xf7, 0xdc, 0xc8, 0x0f,
+ 0x42, 0xd1, 0x34, 0x5b, 0x11, 0x2b, 0xa2, 0x0c, 0x2b, 0x28, 0x3a, 0x0b, 0x7d, 0xad, 0x98, 0x89,
+ 0x1d, 0x95, 0x0c, 0x30, 0x63, 0x5f, 0x19, 0x84, 0x62, 0xb4, 0x43, 0x12, 0x88, 0x15, 0xa3, 0x30,
+ 0x6e, 0x84, 0x24, 0xc0, 0x0c, 0x12, 0x73, 0x02, 0x94, 0x47, 0x10, 0x37, 0x42, 0x82, 0x13, 0xa0,
+ 0x10, 0xac, 0x61, 0xa1, 0x0a, 0x0c, 0x87, 0x89, 0x99, 0xed, 0x69, 0x6b, 0x8e, 0x31, 0x56, 0x41,
+ 0xcd, 0x64, 0x4c, 0xc4, 0xb8, 0xc1, 0x06, 0xba, 0xb2, 0x0a, 0x5f, 0x2f, 0x00, 0xe2, 0x43, 0xf8,
+ 0x5d, 0x36, 0x70, 0x37, 0xd2, 0x03, 0xd7, 0xfb, 0x96, 0x78, 0x50, 0xa3, 0xf7, 0xff, 0x5a, 0x70,
+ 0x6a, 0xc1, 0xf5, 0xea, 0x24, 0xc8, 0x59, 0x80, 0x0f, 0xe7, 0x29, 0x7f, 0x30, 0x26, 0xc5, 0x58,
+ 0x62, 0x7d, 0x0f, 0x60, 0x89, 0xd9, 0xff, 0x6c, 0x01, 0xe2, 0x9f, 0xfd, 0xbe, 0xfb, 0xd8, 0x1b,
+ 0xe9, 0x8f, 0x7d, 0x00, 0xcb, 0xc2, 0xbe, 0x06, 0xe3, 0x0b, 0x0d, 0x97, 0x78, 0x51, 0xb9, 0xb2,
+ 0xe0, 0x7b, 0xeb, 0xee, 0x06, 0x7a, 0x05, 0xc6, 0x23, 0xb7, 0x49, 0xfc, 0x76, 0x54, 0x25, 0x35,
+ 0xdf, 0x63, 0x2f, 0x57, 0xeb, 0x7c, 0xff, 0x3c, 0xda, 0xdb, 0x2d, 0x8d, 0xaf, 0x19, 0x10, 0x9c,
+ 0xc0, 0xb4, 0x7f, 0x95, 0x9e, 0x5b, 0x8d, 0x76, 0x18, 0x91, 0x60, 0x2d, 0x68, 0x87, 0xd1, 0x7c,
+ 0x9b, 0xf2, 0x9e, 0x95, 0xc0, 0xa7, 0xdd, 0x71, 0x7d, 0x0f, 0x9d, 0x32, 0x9e, 0xe3, 0x43, 0xf2,
+ 0x29, 0x2e, 0x9e, 0xdd, 0xb3, 0x00, 0xa1, 0xbb, 0xe1, 0x91, 0x40, 0x7b, 0x3e, 0x8c, 0xb3, 0xad,
+ 0xa2, 0x4a, 0xb1, 0x86, 0x81, 0x1a, 0x30, 0xd6, 0x70, 0x6e, 0x93, 0x46, 0x95, 0x34, 0x48, 0x2d,
+ 0xf2, 0x03, 0x21, 0xdf, 0x78, 0xa1, 0xb7, 0x77, 0xc0, 0x35, 0xbd, 0xea, 0xfc, 0xd4, 0xde, 0x6e,
+ 0x69, 0xcc, 0x28, 0xc2, 0x26, 0x71, 0x7a, 0x74, 0xf8, 0x2d, 0xfa, 0x15, 0x4e, 0x43, 0x7f, 0x7c,
+ 0x5e, 0x17, 0x65, 0x58, 0x41, 0xd5, 0xd1, 0xd1, 0x97, 0x77, 0x74, 0xd8, 0x7f, 0x47, 0x17, 0x9a,
+ 0xdf, 0x6c, 0xf9, 0x1e, 0xf1, 0xa2, 0x05, 0xdf, 0xab, 0x73, 0xc9, 0xd4, 0x2b, 0x86, 0xe8, 0xe4,
+ 0x5c, 0x42, 0x74, 0x72, 0x3c, 0x5d, 0x43, 0x93, 0x9e, 0x7c, 0x14, 0x06, 0xc2, 0xc8, 0x89, 0xda,
+ 0xa1, 0x18, 0xb8, 0x47, 0xe5, 0xb2, 0xab, 0xb2, 0xd2, 0xfd, 0xdd, 0xd2, 0x84, 0xaa, 0xc6, 0x8b,
+ 0xb0, 0xa8, 0x80, 0x9e, 0x82, 0xc1, 0x26, 0x09, 0x43, 0x67, 0x43, 0xb2, 0x0d, 0x13, 0xa2, 0xee,
+ 0xe0, 0x0a, 0x2f, 0xc6, 0x12, 0x8e, 0x1e, 0x83, 0x7e, 0x12, 0x04, 0x7e, 0x20, 0xbe, 0x6d, 0x4c,
+ 0x20, 0xf6, 0x2f, 0xd1, 0x42, 0xcc, 0x61, 0xf6, 0xff, 0x6c, 0xc1, 0x84, 0xea, 0x2b, 0x6f, 0xeb,
+ 0x10, 0x9e, 0x6b, 0x6f, 0x03, 0xd4, 0xe4, 0x07, 0x86, 0xec, 0x9a, 0x1d, 0x79, 0xfe, 0x5c, 0x26,
+ 0x47, 0x93, 0x1a, 0xc6, 0x98, 0xb2, 0x2a, 0x0a, 0xb1, 0x46, 0xcd, 0xfe, 0x63, 0x0b, 0x8e, 0x24,
+ 0xbe, 0xe8, 0x9a, 0x1b, 0x46, 0xe8, 0x9d, 0xd4, 0x57, 0xcd, 0xf6, 0xb8, 0xf8, 0xdc, 0x90, 0x7f,
+ 0x93, 0xda, 0xf3, 0xb2, 0x44, 0xfb, 0xa2, 0x2b, 0xd0, 0xef, 0x46, 0xa4, 0x29, 0x3f, 0xe6, 0xb1,
+ 0x8e, 0x1f, 0xc3, 0x7b, 0x15, 0xcf, 0x48, 0x99, 0xd6, 0xc4, 0x9c, 0x80, 0xfd, 0x67, 0x45, 0x18,
+ 0xe6, 0xfb, 0x7b, 0xc5, 0x69, 0x1d, 0xc2, 0x5c, 0x3c, 0x03, 0xc3, 0x6e, 0xb3, 0xd9, 0x8e, 0x9c,
+ 0xdb, 0xe2, 0xde, 0x1b, 0xe2, 0x67, 0x50, 0x59, 0x16, 0xe2, 0x18, 0x8e, 0xca, 0xd0, 0xc7, 0xba,
+ 0xc2, 0xbf, 0xf2, 0xc9, 0xec, 0xaf, 0x14, 0x7d, 0x9f, 0x5d, 0x74, 0x22, 0x87, 0xb3, 0x9c, 0x6a,
+ 0x5f, 0xd1, 0x22, 0xcc, 0x48, 0x20, 0x07, 0xe0, 0xb6, 0xeb, 0x39, 0xc1, 0x0e, 0x2d, 0x9b, 0x2e,
+ 0x32, 0x82, 0xcf, 0x75, 0x26, 0x38, 0xaf, 0xf0, 0x39, 0x59, 0xf5, 0x61, 0x31, 0x00, 0x6b, 0x44,
+ 0x67, 0x5e, 0x86, 0x61, 0x85, 0x7c, 0x10, 0xce, 0x71, 0xe6, 0x63, 0x30, 0x91, 0x68, 0xab, 0x5b,
+ 0xf5, 0x51, 0x9d, 0xf1, 0xfc, 0x43, 0x76, 0x64, 0x88, 0x5e, 0x2f, 0x79, 0xdb, 0xe2, 0x6e, 0xba,
+ 0x07, 0x47, 0x1b, 0x19, 0x47, 0xbe, 0x98, 0xd7, 0xde, 0xaf, 0x88, 0x53, 0xe2, 0xb3, 0x8f, 0x66,
+ 0x41, 0x71, 0x66, 0x1b, 0xc6, 0x89, 0x58, 0xe8, 0x74, 0x22, 0xd2, 0xf3, 0xee, 0xa8, 0xea, 0xfc,
+ 0x55, 0xb2, 0xa3, 0x0e, 0xd5, 0xef, 0x64, 0xf7, 0x4f, 0xf3, 0xd1, 0xe7, 0xc7, 0xe5, 0x88, 0x20,
+ 0x50, 0xbc, 0x4a, 0x76, 0xf8, 0x54, 0xe8, 0x5f, 0x57, 0xec, 0xf8, 0x75, 0x5f, 0xb3, 0x60, 0x4c,
+ 0x7d, 0xdd, 0x21, 0x9c, 0x0b, 0xf3, 0xe6, 0xb9, 0x70, 0xba, 0xe3, 0x02, 0xcf, 0x39, 0x11, 0xbe,
+ 0x5e, 0x80, 0x93, 0x0a, 0x87, 0x3e, 0xa2, 0xf8, 0x1f, 0xb1, 0xaa, 0x2e, 0xc0, 0xb0, 0xa7, 0xc4,
+ 0x89, 0x96, 0x29, 0xc7, 0x8b, 0x85, 0x89, 0x31, 0x0e, 0xbd, 0xf2, 0xbc, 0xf8, 0xd2, 0x1e, 0xd5,
+ 0xe5, 0xec, 0xe2, 0x72, 0x9f, 0x87, 0x62, 0xdb, 0xad, 0x8b, 0x0b, 0xe6, 0xc3, 0x72, 0xb4, 0x6f,
+ 0x94, 0x17, 0xf7, 0x77, 0x4b, 0x8f, 0xe6, 0xa9, 0x9c, 0xe8, 0xcd, 0x16, 0xce, 0xde, 0x28, 0x2f,
+ 0x62, 0x5a, 0x19, 0xcd, 0xc1, 0x84, 0xd4, 0xaa, 0xdd, 0xa4, 0x7c, 0xa9, 0xef, 0x89, 0x7b, 0x48,
+ 0x09, 0xcb, 0xb1, 0x09, 0xc6, 0x49, 0x7c, 0xb4, 0x08, 0x93, 0x5b, 0xed, 0xdb, 0xa4, 0x41, 0x22,
+ 0xfe, 0xc1, 0x57, 0x09, 0x17, 0x25, 0x0f, 0xc7, 0x4f, 0xd8, 0xab, 0x09, 0x38, 0x4e, 0xd5, 0xb0,
+ 0xbf, 0xcd, 0xee, 0x03, 0x31, 0x7a, 0x1a, 0x7f, 0xf3, 0x9d, 0x5c, 0xce, 0xbd, 0xac, 0x8a, 0xab,
+ 0x64, 0x67, 0xcd, 0xa7, 0x7c, 0x48, 0xf6, 0xaa, 0x30, 0xd6, 0x7c, 0x5f, 0xc7, 0x35, 0xff, 0xbb,
+ 0x05, 0x38, 0xa6, 0x46, 0xc0, 0xe0, 0x96, 0xbf, 0xdb, 0xc7, 0xe0, 0x22, 0x8c, 0xd4, 0xc9, 0xba,
+ 0xd3, 0x6e, 0x44, 0x4a, 0xaf, 0xd1, 0xcf, 0x55, 0x6d, 0x8b, 0x71, 0x31, 0xd6, 0x71, 0x0e, 0x30,
+ 0x6c, 0xbf, 0x39, 0xc6, 0x2e, 0xe2, 0xc8, 0xa1, 0x6b, 0x5c, 0xed, 0x1a, 0x2b, 0x77, 0xd7, 0x3c,
+ 0x06, 0xfd, 0x6e, 0x93, 0x32, 0x66, 0x05, 0x93, 0xdf, 0x2a, 0xd3, 0x42, 0xcc, 0x61, 0xe8, 0x09,
+ 0x18, 0xac, 0xf9, 0xcd, 0xa6, 0xe3, 0xd5, 0xd9, 0x95, 0x37, 0x3c, 0x3f, 0x42, 0x79, 0xb7, 0x05,
+ 0x5e, 0x84, 0x25, 0x8c, 0x32, 0xdf, 0x4e, 0xb0, 0xc1, 0x85, 0x3d, 0x82, 0xf9, 0x9e, 0x0b, 0x36,
+ 0x42, 0xcc, 0x4a, 0xe9, 0x5b, 0xf5, 0x8e, 0x1f, 0x6c, 0xb9, 0xde, 0xc6, 0xa2, 0x1b, 0x88, 0x2d,
+ 0xa1, 0xee, 0xc2, 0x5b, 0x0a, 0x82, 0x35, 0x2c, 0xb4, 0x0c, 0xfd, 0x2d, 0x3f, 0x88, 0xc2, 0xe9,
+ 0x01, 0x36, 0xdc, 0x8f, 0xe6, 0x1c, 0x44, 0xfc, 0x6b, 0x2b, 0x7e, 0x10, 0xc5, 0x1f, 0x40, 0xff,
+ 0x85, 0x98, 0x57, 0x47, 0xd7, 0x60, 0x90, 0x78, 0xdb, 0xcb, 0x81, 0xdf, 0x9c, 0x3e, 0x92, 0x4f,
+ 0x69, 0x89, 0xa3, 0xf0, 0x65, 0x16, 0xf3, 0xa8, 0xa2, 0x18, 0x4b, 0x12, 0xe8, 0xa3, 0x50, 0x24,
+ 0xde, 0xf6, 0xf4, 0x20, 0xa3, 0x34, 0x93, 0x43, 0xe9, 0xa6, 0x13, 0xc4, 0x67, 0xfe, 0x92, 0xb7,
+ 0x8d, 0x69, 0x1d, 0xf4, 0x09, 0x18, 0x96, 0x07, 0x46, 0x28, 0xa4, 0xa8, 0x99, 0x0b, 0x56, 0x1e,
+ 0x33, 0x98, 0xbc, 0xdb, 0x76, 0x03, 0xd2, 0x24, 0x5e, 0x14, 0xc6, 0x27, 0xa4, 0x84, 0x86, 0x38,
+ 0xa6, 0x86, 0x6a, 0x30, 0x1a, 0x90, 0xd0, 0xbd, 0x47, 0x2a, 0x7e, 0xc3, 0xad, 0xed, 0x4c, 0x9f,
+ 0x60, 0xdd, 0x7b, 0xaa, 0xe3, 0x90, 0x61, 0xad, 0x42, 0x2c, 0xe5, 0xd7, 0x4b, 0xb1, 0x41, 0x14,
+ 0xbd, 0x05, 0x63, 0x01, 0x09, 0x23, 0x27, 0x88, 0x44, 0x2b, 0xd3, 0x4a, 0x2b, 0x37, 0x86, 0x75,
+ 0x00, 0x7f, 0x4e, 0xc4, 0xcd, 0xc4, 0x10, 0x6c, 0x52, 0x40, 0x9f, 0x90, 0x2a, 0x87, 0x15, 0xbf,
+ 0xed, 0x45, 0xe1, 0xf4, 0x30, 0xeb, 0x77, 0xa6, 0x6e, 0xfa, 0x66, 0x8c, 0x97, 0xd4, 0x49, 0xf0,
+ 0xca, 0xd8, 0x20, 0x85, 0x3e, 0x05, 0x63, 0xfc, 0x3f, 0x57, 0xa9, 0x86, 0xd3, 0xc7, 0x18, 0xed,
+ 0xb3, 0xf9, 0xb4, 0x39, 0xe2, 0xfc, 0x31, 0x41, 0x7c, 0x4c, 0x2f, 0x0d, 0xb1, 0x49, 0x0d, 0x61,
+ 0x18, 0x6b, 0xb8, 0xdb, 0xc4, 0x23, 0x61, 0x58, 0x09, 0xfc, 0xdb, 0x44, 0x48, 0x88, 0x4f, 0x66,
+ 0xab, 0x60, 0xfd, 0xdb, 0x44, 0x3c, 0x02, 0xf5, 0x3a, 0xd8, 0x24, 0x81, 0x6e, 0xc0, 0x38, 0x7d,
+ 0x92, 0xbb, 0x31, 0xd1, 0x91, 0x6e, 0x44, 0xd9, 0xc3, 0x19, 0x1b, 0x95, 0x70, 0x82, 0x08, 0xba,
+ 0x0e, 0xa3, 0x6c, 0xcc, 0xdb, 0x2d, 0x4e, 0xf4, 0x78, 0x37, 0xa2, 0xcc, 0xa0, 0xa0, 0xaa, 0x55,
+ 0xc1, 0x06, 0x01, 0xf4, 0x26, 0x0c, 0x37, 0xdc, 0x75, 0x52, 0xdb, 0xa9, 0x35, 0xc8, 0xf4, 0x28,
+ 0xa3, 0x96, 0x79, 0x18, 0x5e, 0x93, 0x48, 0x9c, 0x3f, 0x57, 0x7f, 0x71, 0x5c, 0x1d, 0xdd, 0x84,
+ 0xe3, 0x11, 0x09, 0x9a, 0xae, 0xe7, 0xd0, 0x43, 0x4c, 0x3c, 0x09, 0x99, 0x66, 0x7c, 0x8c, 0xad,
+ 0xae, 0x33, 0x62, 0x36, 0x8e, 0xaf, 0x65, 0x62, 0xe1, 0x9c, 0xda, 0xe8, 0x2e, 0x4c, 0x67, 0x40,
+ 0xf8, 0xba, 0x3d, 0xca, 0x28, 0xbf, 0x26, 0x28, 0x4f, 0xaf, 0xe5, 0xe0, 0xed, 0x77, 0x80, 0xe1,
+ 0x5c, 0xea, 0xe8, 0x3a, 0x4c, 0xb0, 0x93, 0xb3, 0xd2, 0x6e, 0x34, 0x44, 0x83, 0xe3, 0xac, 0xc1,
+ 0x27, 0x24, 0x1f, 0x51, 0x36, 0xc1, 0xfb, 0xbb, 0x25, 0x88, 0xff, 0xe1, 0x64, 0x6d, 0x74, 0x9b,
+ 0x29, 0x61, 0xdb, 0x81, 0x1b, 0xed, 0xd0, 0x5d, 0x45, 0xee, 0x46, 0xd3, 0x13, 0x1d, 0x05, 0x52,
+ 0x3a, 0xaa, 0xd2, 0xd4, 0xea, 0x85, 0x38, 0x49, 0x90, 0x5e, 0x05, 0x61, 0x54, 0x77, 0xbd, 0xe9,
+ 0x49, 0xfe, 0x9e, 0x92, 0x27, 0x69, 0x95, 0x16, 0x62, 0x0e, 0x63, 0x0a, 0x58, 0xfa, 0xe3, 0x3a,
+ 0xbd, 0x71, 0xa7, 0x18, 0x62, 0xac, 0x80, 0x95, 0x00, 0x1c, 0xe3, 0x50, 0x26, 0x38, 0x8a, 0x76,
+ 0xa6, 0x11, 0x43, 0x55, 0x07, 0xe2, 0xda, 0xda, 0x27, 0x30, 0x2d, 0xb7, 0x6f, 0xc3, 0xb8, 0x3a,
+ 0x26, 0xd8, 0x98, 0xa0, 0x12, 0xf4, 0x33, 0xb6, 0x4f, 0x88, 0x4f, 0x87, 0x69, 0x17, 0x18, 0x4b,
+ 0x88, 0x79, 0x39, 0xeb, 0x82, 0x7b, 0x8f, 0xcc, 0xef, 0x44, 0x84, 0xcb, 0x22, 0x8a, 0x5a, 0x17,
+ 0x24, 0x00, 0xc7, 0x38, 0xf6, 0xbf, 0xe7, 0xec, 0x73, 0x7c, 0x4b, 0xf4, 0x70, 0x2f, 0x3e, 0x0b,
+ 0x43, 0xcc, 0xf0, 0xc3, 0x0f, 0xb8, 0x76, 0xb6, 0x3f, 0x66, 0x98, 0xaf, 0x88, 0x72, 0xac, 0x30,
+ 0xd0, 0xab, 0x30, 0x56, 0xd3, 0x1b, 0x10, 0x97, 0xba, 0x3a, 0x46, 0x8c, 0xd6, 0xb1, 0x89, 0x8b,
+ 0x2e, 0xc1, 0x10, 0xb3, 0x71, 0xaa, 0xf9, 0x0d, 0xc1, 0x6d, 0x4a, 0xce, 0x64, 0xa8, 0x22, 0xca,
+ 0xf7, 0xb5, 0xdf, 0x58, 0x61, 0xa3, 0x73, 0x30, 0x40, 0xbb, 0x50, 0xae, 0x88, 0xeb, 0x54, 0x49,
+ 0x02, 0xaf, 0xb0, 0x52, 0x2c, 0xa0, 0xf6, 0x1f, 0x5b, 0x8c, 0x97, 0x4a, 0x9f, 0xf9, 0xe8, 0x0a,
+ 0xbb, 0x34, 0xd8, 0x0d, 0xa2, 0x69, 0xe1, 0x1f, 0xd7, 0x6e, 0x02, 0x05, 0xdb, 0x4f, 0xfc, 0xc7,
+ 0x46, 0x4d, 0xf4, 0x76, 0xf2, 0x66, 0xe0, 0x0c, 0xc5, 0x8b, 0x72, 0x08, 0x92, 0xb7, 0xc3, 0x23,
+ 0xf1, 0x15, 0x47, 0xfb, 0xd3, 0xe9, 0x8a, 0xb0, 0x7f, 0xaa, 0xa0, 0xad, 0x92, 0x6a, 0xe4, 0x44,
+ 0x04, 0x55, 0x60, 0xf0, 0x8e, 0xe3, 0x46, 0xae, 0xb7, 0x21, 0xf8, 0xbe, 0xce, 0x17, 0x1d, 0xab,
+ 0x74, 0x8b, 0x57, 0xe0, 0xdc, 0x8b, 0xf8, 0x83, 0x25, 0x19, 0x4a, 0x31, 0x68, 0x7b, 0x1e, 0xa5,
+ 0x58, 0xe8, 0x95, 0x22, 0xe6, 0x15, 0x38, 0x45, 0xf1, 0x07, 0x4b, 0x32, 0xe8, 0x1d, 0x00, 0x79,
+ 0x42, 0x90, 0xba, 0x90, 0x1d, 0x3e, 0xdb, 0x9d, 0xe8, 0x9a, 0xaa, 0xc3, 0x85, 0x93, 0xf1, 0x7f,
+ 0xac, 0xd1, 0xb3, 0x23, 0x6d, 0x4e, 0xf5, 0xce, 0xa0, 0x4f, 0xd2, 0x2d, 0xea, 0x04, 0x11, 0xa9,
+ 0xcf, 0x45, 0x62, 0x70, 0x9e, 0xee, 0xed, 0x71, 0xb8, 0xe6, 0x36, 0x89, 0xbe, 0x9d, 0x05, 0x11,
+ 0x1c, 0xd3, 0xb3, 0x7f, 0xbf, 0x08, 0xd3, 0x79, 0xdd, 0xa5, 0x9b, 0x86, 0xdc, 0x75, 0xa3, 0x05,
+ 0xca, 0xd6, 0x5a, 0xe6, 0xa6, 0x59, 0x12, 0xe5, 0x58, 0x61, 0xd0, 0xd5, 0x1b, 0xba, 0x1b, 0xf2,
+ 0x6d, 0xdf, 0x1f, 0xaf, 0xde, 0x2a, 0x2b, 0xc5, 0x02, 0x4a, 0xf1, 0x02, 0xe2, 0x84, 0xc2, 0xf8,
+ 0x4e, 0x5b, 0xe5, 0x98, 0x95, 0x62, 0x01, 0xd5, 0xa5, 0x8c, 0x7d, 0x5d, 0xa4, 0x8c, 0xc6, 0x10,
+ 0xf5, 0x3f, 0xd8, 0x21, 0x42, 0x9f, 0x06, 0x58, 0x77, 0x3d, 0x37, 0xdc, 0x64, 0xd4, 0x07, 0x0e,
+ 0x4c, 0x5d, 0x31, 0xc5, 0xcb, 0x8a, 0x0a, 0xd6, 0x28, 0xa2, 0x97, 0x60, 0x44, 0x1d, 0x20, 0xe5,
+ 0x45, 0xa6, 0xfa, 0xd7, 0x4c, 0xa9, 0xe2, 0xd3, 0x74, 0x11, 0xeb, 0x78, 0xf6, 0x67, 0x93, 0xeb,
+ 0x45, 0xec, 0x00, 0x6d, 0x7c, 0xad, 0x5e, 0xc7, 0xb7, 0xd0, 0x79, 0x7c, 0xed, 0xbf, 0x1e, 0x86,
+ 0x09, 0xa3, 0xb1, 0x76, 0xd8, 0xc3, 0x99, 0x7b, 0x99, 0x5e, 0x40, 0x4e, 0x44, 0xc4, 0xfe, 0xb3,
+ 0xbb, 0x6f, 0x15, 0xfd, 0x92, 0xa2, 0x3b, 0x80, 0xd7, 0x47, 0x9f, 0x86, 0xe1, 0x86, 0x13, 0x32,
+ 0x89, 0x25, 0x11, 0xfb, 0xae, 0x17, 0x62, 0xf1, 0x83, 0xd0, 0x09, 0x23, 0xed, 0xd6, 0xe7, 0xb4,
+ 0x63, 0x92, 0xf4, 0xa6, 0xa4, 0xfc, 0x95, 0xb4, 0xee, 0x54, 0x9d, 0xa0, 0x4c, 0xd8, 0x0e, 0xe6,
+ 0x30, 0x74, 0x89, 0x1d, 0xad, 0x74, 0x55, 0x2c, 0x50, 0x6e, 0x94, 0x2d, 0xb3, 0x7e, 0x83, 0xc9,
+ 0x56, 0x30, 0x6c, 0x60, 0xc6, 0x6f, 0xb2, 0x81, 0x0e, 0x6f, 0xb2, 0xa7, 0x60, 0x90, 0xfd, 0x50,
+ 0x2b, 0x40, 0xcd, 0x46, 0x99, 0x17, 0x63, 0x09, 0x4f, 0x2e, 0x98, 0xa1, 0xde, 0x16, 0x0c, 0x7d,
+ 0xf5, 0x89, 0x45, 0xcd, 0xcc, 0x2e, 0x86, 0xf8, 0x29, 0x27, 0x96, 0x3c, 0x96, 0x30, 0xf4, 0x6b,
+ 0x16, 0x20, 0xa7, 0x41, 0x5f, 0xcb, 0xb4, 0x58, 0x3d, 0x6e, 0x80, 0xb1, 0xda, 0xaf, 0x76, 0x1d,
+ 0xf6, 0x76, 0x38, 0x3b, 0x97, 0xaa, 0xcd, 0x25, 0xa5, 0xaf, 0x88, 0x2e, 0xa2, 0x34, 0x82, 0x7e,
+ 0x19, 0x5d, 0x73, 0xc3, 0xe8, 0xf3, 0x7f, 0x9f, 0xb8, 0x9c, 0x32, 0xba, 0x84, 0x6e, 0xe8, 0x8f,
+ 0xaf, 0x91, 0x03, 0x3e, 0xbe, 0xc6, 0x72, 0x1f, 0x5e, 0xdf, 0x9f, 0x78, 0xc0, 0x8c, 0xb2, 0x2f,
+ 0x7f, 0xa2, 0xcb, 0x03, 0x46, 0x88, 0xd3, 0x7b, 0x79, 0xc6, 0x54, 0x84, 0x1e, 0x78, 0x8c, 0x75,
+ 0xb9, 0xf3, 0x23, 0xf8, 0x46, 0x48, 0x82, 0xf9, 0x93, 0x52, 0x4d, 0xbc, 0xaf, 0xf3, 0x1e, 0x9a,
+ 0xde, 0xf8, 0x87, 0x2c, 0x98, 0x4e, 0x0f, 0x10, 0xef, 0xd2, 0xf4, 0x38, 0xeb, 0xbf, 0xdd, 0x69,
+ 0x64, 0x44, 0xe7, 0xa5, 0xb9, 0xeb, 0xf4, 0x5c, 0x0e, 0x2d, 0x9c, 0xdb, 0x0a, 0xba, 0x04, 0x10,
+ 0x46, 0x7e, 0x8b, 0x9f, 0xf5, 0x8c, 0x99, 0x1d, 0x66, 0x06, 0x17, 0x50, 0x55, 0xa5, 0xfb, 0xf1,
+ 0x5d, 0xa0, 0xe1, 0xce, 0xb4, 0xe1, 0x44, 0xce, 0x8a, 0xc9, 0x90, 0x77, 0x2f, 0xea, 0xf2, 0xee,
+ 0x2e, 0x52, 0xd2, 0x59, 0x39, 0xa7, 0xb3, 0x6f, 0xb5, 0x1d, 0x2f, 0x72, 0xa3, 0x1d, 0x5d, 0x3e,
+ 0xee, 0x81, 0x39, 0x94, 0xe8, 0x53, 0xd0, 0xdf, 0x70, 0xbd, 0xf6, 0x5d, 0x71, 0xc7, 0x9e, 0xcb,
+ 0x7e, 0xfe, 0x78, 0xed, 0xbb, 0xe6, 0xe4, 0x94, 0xe8, 0x56, 0x66, 0xe5, 0xfb, 0xbb, 0x25, 0x94,
+ 0x46, 0xc0, 0x9c, 0xaa, 0xfd, 0x34, 0x8c, 0x2f, 0x3a, 0xa4, 0xe9, 0x7b, 0x4b, 0x5e, 0xbd, 0xe5,
+ 0xbb, 0x5e, 0x84, 0xa6, 0xa1, 0x8f, 0x31, 0x97, 0xfc, 0x6a, 0xed, 0xa3, 0x83, 0x8f, 0x59, 0x89,
+ 0xbd, 0x01, 0xc7, 0x16, 0xfd, 0x3b, 0xde, 0x1d, 0x27, 0xa8, 0xcf, 0x55, 0xca, 0x9a, 0xbc, 0x70,
+ 0x55, 0xca, 0xab, 0xac, 0x7c, 0x69, 0x80, 0x56, 0x93, 0x2f, 0xc2, 0x65, 0xb7, 0x41, 0x72, 0xa4,
+ 0xba, 0x3f, 0x5b, 0x30, 0x5a, 0x8a, 0xf1, 0x95, 0x4e, 0xd2, 0xca, 0x35, 0x67, 0x78, 0x0b, 0x86,
+ 0xd6, 0x5d, 0xd2, 0xa8, 0x63, 0xb2, 0x2e, 0x66, 0xe3, 0xc9, 0x7c, 0x83, 0xc7, 0x65, 0x8a, 0xa9,
+ 0x94, 0xa7, 0x4c, 0xda, 0xb5, 0x2c, 0x2a, 0x63, 0x45, 0x06, 0x6d, 0xc1, 0xa4, 0x9c, 0x33, 0x09,
+ 0x15, 0xe7, 0xfd, 0x53, 0x9d, 0x96, 0xaf, 0x49, 0x9c, 0x19, 0x7f, 0xe3, 0x04, 0x19, 0x9c, 0x22,
+ 0x8c, 0x4e, 0x41, 0x5f, 0x93, 0x72, 0x36, 0x7d, 0x6c, 0xf8, 0x99, 0x78, 0x8b, 0x49, 0xea, 0x58,
+ 0xa9, 0xfd, 0xf3, 0x16, 0x9c, 0x48, 0x8d, 0x8c, 0x90, 0x58, 0x3e, 0xe0, 0x59, 0x48, 0x4a, 0x10,
+ 0x0b, 0xdd, 0x25, 0x88, 0xf6, 0x7f, 0x61, 0xc1, 0xd1, 0xa5, 0x66, 0x2b, 0xda, 0x59, 0x74, 0x4d,
+ 0xdb, 0x83, 0x97, 0x61, 0xa0, 0x49, 0xea, 0x6e, 0xbb, 0x29, 0x66, 0xae, 0x24, 0x6f, 0xff, 0x15,
+ 0x56, 0x4a, 0x4f, 0x90, 0x6a, 0xe4, 0x07, 0xce, 0x06, 0xe1, 0x05, 0x58, 0xa0, 0x33, 0x1e, 0xca,
+ 0xbd, 0x47, 0xae, 0xb9, 0x4d, 0x37, 0xba, 0xbf, 0xdd, 0x25, 0xcc, 0x06, 0x24, 0x11, 0x1c, 0xd3,
+ 0xb3, 0xbf, 0x69, 0xc1, 0x84, 0x5c, 0xf7, 0x73, 0xf5, 0x7a, 0x40, 0xc2, 0x10, 0xcd, 0x40, 0xc1,
+ 0x6d, 0x89, 0x5e, 0x82, 0xe8, 0x65, 0xa1, 0x5c, 0xc1, 0x05, 0xb7, 0x25, 0x9f, 0x6b, 0x8c, 0xc1,
+ 0x28, 0x9a, 0x16, 0x14, 0x57, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x3c, 0x0c, 0x79, 0x7e, 0x9d, 0xbf,
+ 0x78, 0x84, 0x0e, 0x9d, 0x62, 0xae, 0x8a, 0x32, 0xac, 0xa0, 0xa8, 0x02, 0xc3, 0xdc, 0xbe, 0x36,
+ 0x5e, 0xb4, 0x3d, 0x59, 0xe9, 0xb2, 0x2f, 0x5b, 0x93, 0x35, 0x71, 0x4c, 0xc4, 0xfe, 0x53, 0x0b,
+ 0x46, 0xe5, 0x97, 0xf5, 0xf8, 0x16, 0xa5, 0x5b, 0x2b, 0x7e, 0x87, 0xc6, 0x5b, 0x8b, 0xbe, 0x25,
+ 0x19, 0xc4, 0x78, 0x42, 0x16, 0x0f, 0xf4, 0x84, 0xbc, 0x08, 0x23, 0x4e, 0xab, 0x55, 0x31, 0xdf,
+ 0x9f, 0x6c, 0x29, 0xcd, 0xc5, 0xc5, 0x58, 0xc7, 0xb1, 0x7f, 0xae, 0x00, 0xe3, 0xf2, 0x0b, 0xaa,
+ 0xed, 0xdb, 0x21, 0x89, 0xd0, 0x1a, 0x0c, 0x3b, 0x7c, 0x96, 0x88, 0x5c, 0xe4, 0x8f, 0x65, 0xcb,
+ 0x45, 0x8d, 0x29, 0x8d, 0x19, 0xe9, 0x39, 0x59, 0x1b, 0xc7, 0x84, 0x50, 0x03, 0xa6, 0x3c, 0x3f,
+ 0x62, 0x4c, 0x95, 0x82, 0x77, 0x52, 0x55, 0x27, 0xa9, 0x9f, 0x14, 0xd4, 0xa7, 0x56, 0x93, 0x54,
+ 0x70, 0x9a, 0x30, 0x5a, 0x92, 0xb2, 0xe6, 0x62, 0xbe, 0x90, 0x50, 0x9f, 0xb8, 0x6c, 0x51, 0xb3,
+ 0xfd, 0x47, 0x16, 0x0c, 0x4b, 0xb4, 0xc3, 0xb0, 0x4a, 0x58, 0x81, 0xc1, 0x90, 0x4d, 0x82, 0x1c,
+ 0x1a, 0xbb, 0x53, 0xc7, 0xf9, 0x7c, 0xc5, 0xbc, 0x22, 0xff, 0x1f, 0x62, 0x49, 0x83, 0xa9, 0x1a,
+ 0x55, 0xf7, 0xdf, 0x27, 0xaa, 0x46, 0xd5, 0x9f, 0x9c, 0x4b, 0xe9, 0x1f, 0x58, 0x9f, 0x35, 0xd9,
+ 0x3d, 0x7d, 0xd2, 0xb4, 0x02, 0xb2, 0xee, 0xde, 0x4d, 0x3e, 0x69, 0x2a, 0xac, 0x14, 0x0b, 0x28,
+ 0x7a, 0x07, 0x46, 0x6b, 0x52, 0xc7, 0x14, 0xef, 0xf0, 0x73, 0x1d, 0xf5, 0x9d, 0x4a, 0x35, 0xce,
+ 0x65, 0xa4, 0x0b, 0x5a, 0x7d, 0x6c, 0x50, 0x33, 0xed, 0xc7, 0x8a, 0xdd, 0xec, 0xc7, 0x62, 0xba,
+ 0xf9, 0xd6, 0x54, 0xbf, 0x60, 0xc1, 0x00, 0xd7, 0x2d, 0xf4, 0xa6, 0xda, 0xd1, 0x2c, 0x05, 0xe2,
+ 0xb1, 0xbb, 0x49, 0x0b, 0x05, 0x67, 0x83, 0x56, 0x60, 0x98, 0xfd, 0x60, 0xba, 0x91, 0x62, 0xbe,
+ 0xb7, 0x19, 0x6f, 0x55, 0xef, 0xe0, 0x4d, 0x59, 0x0d, 0xc7, 0x14, 0xec, 0x9f, 0x2e, 0xd2, 0xd3,
+ 0x2d, 0x46, 0x35, 0x2e, 0x7d, 0xeb, 0xe1, 0x5d, 0xfa, 0x85, 0x87, 0x75, 0xe9, 0x6f, 0xc0, 0x44,
+ 0x4d, 0xb3, 0x2b, 0x88, 0x67, 0xf2, 0x7c, 0xc7, 0x45, 0xa2, 0x99, 0x20, 0x70, 0xe9, 0xeb, 0x82,
+ 0x49, 0x04, 0x27, 0xa9, 0xa2, 0x4f, 0xc2, 0x28, 0x9f, 0x67, 0xd1, 0x0a, 0x37, 0xc1, 0x7b, 0x22,
+ 0x7f, 0xbd, 0xe8, 0x4d, 0x70, 0x69, 0xbd, 0x56, 0x1d, 0x1b, 0xc4, 0xec, 0x7f, 0xb1, 0x00, 0x2d,
+ 0xb5, 0x36, 0x49, 0x93, 0x04, 0x4e, 0x23, 0x56, 0x0f, 0x7e, 0xc9, 0x82, 0x69, 0x92, 0x2a, 0x5e,
+ 0xf0, 0x9b, 0x4d, 0x21, 0x0c, 0xc8, 0x91, 0x57, 0x2d, 0xe5, 0xd4, 0x89, 0x1f, 0x04, 0x79, 0x18,
+ 0x38, 0xb7, 0x3d, 0xb4, 0x02, 0x47, 0xf8, 0x2d, 0xa9, 0x00, 0x9a, 0x95, 0xde, 0x23, 0x82, 0xf0,
+ 0x91, 0xb5, 0x34, 0x0a, 0xce, 0xaa, 0x67, 0xff, 0xd1, 0x18, 0xe4, 0xf6, 0xe2, 0x03, 0xbd, 0xe8,
+ 0x07, 0x7a, 0xd1, 0x0f, 0xf4, 0xa2, 0x1f, 0xe8, 0x45, 0x3f, 0xd0, 0x8b, 0x7e, 0xa0, 0x17, 0x7d,
+ 0x9f, 0xea, 0x45, 0x7f, 0xc6, 0x82, 0x63, 0xea, 0xfa, 0x32, 0x1e, 0xec, 0x9f, 0x83, 0x23, 0x7c,
+ 0xbb, 0x2d, 0x34, 0x1c, 0xb7, 0xb9, 0x46, 0x9a, 0xad, 0x86, 0x13, 0x49, 0xeb, 0xa7, 0x8b, 0x99,
+ 0x2b, 0x37, 0xe1, 0x62, 0x61, 0x54, 0xe4, 0xbe, 0x6a, 0x19, 0x00, 0x9c, 0xd5, 0x8c, 0xfd, 0xfb,
+ 0x43, 0xd0, 0xbf, 0xb4, 0x4d, 0xbc, 0xe8, 0x10, 0x9e, 0x36, 0x35, 0x18, 0x77, 0xbd, 0x6d, 0xbf,
+ 0xb1, 0x4d, 0xea, 0x1c, 0x7e, 0x90, 0x17, 0xf8, 0x71, 0x41, 0x7a, 0xbc, 0x6c, 0x90, 0xc0, 0x09,
+ 0x92, 0x0f, 0x43, 0xbb, 0x74, 0x19, 0x06, 0xf8, 0xe5, 0x23, 0x54, 0x4b, 0x99, 0x67, 0x36, 0x1b,
+ 0x44, 0x71, 0xa5, 0xc6, 0x9a, 0x2f, 0x7e, 0xb9, 0x89, 0xea, 0xe8, 0xb3, 0x30, 0xbe, 0xee, 0x06,
+ 0x61, 0xb4, 0xe6, 0x36, 0xe9, 0xd5, 0xd0, 0x6c, 0xdd, 0x87, 0x36, 0x49, 0x8d, 0xc3, 0xb2, 0x41,
+ 0x09, 0x27, 0x28, 0xa3, 0x0d, 0x18, 0x6b, 0x38, 0x7a, 0x53, 0x83, 0x07, 0x6e, 0x4a, 0xdd, 0x0e,
+ 0xd7, 0x74, 0x42, 0xd8, 0xa4, 0x4b, 0xb7, 0x53, 0x8d, 0x29, 0x44, 0x86, 0x98, 0x38, 0x43, 0x6d,
+ 0x27, 0xae, 0x09, 0xe1, 0x30, 0xca, 0xa0, 0x31, 0x47, 0x85, 0x61, 0x93, 0x41, 0xd3, 0xdc, 0x11,
+ 0x3e, 0x03, 0xc3, 0x84, 0x0e, 0x21, 0x25, 0x2c, 0x2e, 0x98, 0x0b, 0xbd, 0xf5, 0x75, 0xc5, 0xad,
+ 0x05, 0xbe, 0xa9, 0xc7, 0x5b, 0x92, 0x94, 0x70, 0x4c, 0x14, 0x2d, 0xc0, 0x40, 0x48, 0x02, 0x57,
+ 0xe9, 0x0a, 0x3a, 0x4c, 0x23, 0x43, 0xe3, 0xce, 0x90, 0xfc, 0x37, 0x16, 0x55, 0xe9, 0xf2, 0x72,
+ 0x98, 0x28, 0x96, 0x5d, 0x06, 0xda, 0xf2, 0x9a, 0x63, 0xa5, 0x58, 0x40, 0xd1, 0x9b, 0x30, 0x18,
+ 0x90, 0x06, 0x53, 0x14, 0x8f, 0xf5, 0xbe, 0xc8, 0xb9, 0xde, 0x99, 0xd7, 0xc3, 0x92, 0x00, 0xba,
+ 0x0a, 0x28, 0x20, 0x94, 0xc1, 0x73, 0xbd, 0x0d, 0x65, 0xbe, 0x2f, 0x0e, 0x5a, 0xc5, 0x48, 0xe3,
+ 0x18, 0x43, 0xfa, 0xc1, 0xe2, 0x8c, 0x6a, 0xe8, 0x32, 0x4c, 0xa9, 0xd2, 0xb2, 0x17, 0x46, 0x0e,
+ 0x3d, 0xe0, 0xb8, 0xb8, 0x5e, 0xc9, 0x57, 0x70, 0x12, 0x01, 0xa7, 0xeb, 0xd8, 0xbf, 0x61, 0x01,
+ 0x1f, 0xe7, 0x43, 0x90, 0x2a, 0xbc, 0x6e, 0x4a, 0x15, 0x4e, 0xe6, 0xce, 0x5c, 0x8e, 0x44, 0xe1,
+ 0x37, 0x2c, 0x18, 0xd1, 0x66, 0x36, 0x5e, 0xb3, 0x56, 0x87, 0x35, 0xdb, 0x86, 0x49, 0xba, 0xd2,
+ 0xaf, 0xdf, 0x0e, 0x49, 0xb0, 0x4d, 0xea, 0x6c, 0x61, 0x16, 0xee, 0x6f, 0x61, 0x2a, 0x53, 0xe1,
+ 0x6b, 0x09, 0x82, 0x38, 0xd5, 0x84, 0xfd, 0x19, 0xd9, 0x55, 0x65, 0x59, 0x5d, 0x53, 0x73, 0x9e,
+ 0xb0, 0xac, 0x56, 0xb3, 0x8a, 0x63, 0x1c, 0xba, 0xd5, 0x36, 0xfd, 0x30, 0x4a, 0x5a, 0x56, 0x5f,
+ 0xf1, 0xc3, 0x08, 0x33, 0x88, 0xfd, 0x02, 0xc0, 0xd2, 0x5d, 0x52, 0xe3, 0x2b, 0x56, 0x7f, 0xf4,
+ 0x58, 0xf9, 0x8f, 0x1e, 0xfb, 0x6f, 0x2c, 0x18, 0x5f, 0x5e, 0x30, 0x6e, 0xae, 0x59, 0x00, 0xfe,
+ 0x52, 0xbb, 0x75, 0x6b, 0x55, 0x9a, 0xf7, 0x70, 0x0b, 0x07, 0x55, 0x8a, 0x35, 0x0c, 0x74, 0x12,
+ 0x8a, 0x8d, 0xb6, 0x27, 0xc4, 0x9e, 0x83, 0xf4, 0x7a, 0xbc, 0xd6, 0xf6, 0x30, 0x2d, 0xd3, 0x7c,
+ 0xe0, 0x8a, 0x3d, 0xfb, 0xc0, 0x75, 0x0d, 0xc5, 0x83, 0x4a, 0xd0, 0x7f, 0xe7, 0x8e, 0x5b, 0xe7,
+ 0x11, 0x06, 0x84, 0xe9, 0xd1, 0xad, 0x5b, 0xe5, 0xc5, 0x10, 0xf3, 0x72, 0xfb, 0xcb, 0x45, 0x98,
+ 0x59, 0x6e, 0x90, 0xbb, 0xef, 0x31, 0xca, 0x42, 0xaf, 0x1e, 0x7c, 0x07, 0x13, 0x20, 0x1d, 0xd4,
+ 0x4b, 0xb3, 0xfb, 0x78, 0xac, 0xc3, 0x20, 0x37, 0x2c, 0x96, 0x31, 0x17, 0x32, 0xd5, 0xb9, 0xf9,
+ 0x03, 0x32, 0xcb, 0x0d, 0x94, 0x85, 0x3a, 0x57, 0x5d, 0x98, 0xa2, 0x14, 0x4b, 0xe2, 0x33, 0xaf,
+ 0xc0, 0xa8, 0x8e, 0x79, 0x20, 0x7f, 0xe9, 0x1f, 0x2e, 0xc2, 0x24, 0xed, 0xc1, 0x43, 0x9d, 0x88,
+ 0x1b, 0xe9, 0x89, 0x78, 0xd0, 0x3e, 0xb3, 0xdd, 0x67, 0xe3, 0x9d, 0xe4, 0x6c, 0x5c, 0xcc, 0x9b,
+ 0x8d, 0xc3, 0x9e, 0x83, 0x1f, 0xb1, 0xe0, 0xc8, 0x72, 0xc3, 0xaf, 0x6d, 0x25, 0xfc, 0x5a, 0x5f,
+ 0x82, 0x11, 0x7a, 0x1c, 0x87, 0x46, 0x88, 0x17, 0x23, 0xe8, 0x8f, 0x00, 0x61, 0x1d, 0x4f, 0xab,
+ 0x76, 0xe3, 0x46, 0x79, 0x31, 0x2b, 0x56, 0x90, 0x00, 0x61, 0x1d, 0xcf, 0xfe, 0x4b, 0x0b, 0x4e,
+ 0x5f, 0x5e, 0x58, 0x8a, 0x97, 0x62, 0x2a, 0x5c, 0xd1, 0x39, 0x18, 0x68, 0xd5, 0xb5, 0xae, 0xc4,
+ 0x62, 0xe1, 0x45, 0xd6, 0x0b, 0x01, 0x7d, 0xbf, 0x44, 0x06, 0xbb, 0x01, 0x70, 0x19, 0x57, 0x16,
+ 0xc4, 0xb9, 0x2b, 0xb5, 0x40, 0x56, 0xae, 0x16, 0xe8, 0x09, 0x18, 0xa4, 0xf7, 0x82, 0x5b, 0x93,
+ 0xfd, 0xe6, 0x06, 0x1b, 0xbc, 0x08, 0x4b, 0x98, 0xfd, 0xeb, 0x16, 0x1c, 0xb9, 0xec, 0x46, 0xf4,
+ 0xd2, 0x4e, 0xc6, 0xe3, 0xa1, 0xb7, 0x76, 0xe8, 0x46, 0x7e, 0xb0, 0x93, 0x8c, 0xc7, 0x83, 0x15,
+ 0x04, 0x6b, 0x58, 0xfc, 0x83, 0xb6, 0x5d, 0xe6, 0x29, 0x53, 0x30, 0xf5, 0x6e, 0x58, 0x94, 0x63,
+ 0x85, 0x41, 0xc7, 0xab, 0xee, 0x06, 0x4c, 0x64, 0xb9, 0x23, 0x0e, 0x6e, 0x35, 0x5e, 0x8b, 0x12,
+ 0x80, 0x63, 0x1c, 0xfb, 0x9f, 0x2c, 0x28, 0x5d, 0xe6, 0xfe, 0xbe, 0xeb, 0x61, 0xce, 0xa1, 0xfb,
+ 0x02, 0x0c, 0x13, 0xa9, 0x20, 0x10, 0xbd, 0x56, 0x8c, 0xa8, 0xd2, 0x1c, 0xf0, 0xb0, 0x40, 0x0a,
+ 0xaf, 0x07, 0xe7, 0xfb, 0x83, 0x79, 0x4f, 0x2f, 0x03, 0x22, 0x7a, 0x5b, 0x7a, 0x9c, 0x24, 0x16,
+ 0x70, 0x65, 0x29, 0x05, 0xc5, 0x19, 0x35, 0xec, 0x9f, 0xb7, 0xe0, 0x98, 0xfa, 0xe0, 0xf7, 0xdd,
+ 0x67, 0xda, 0xbf, 0x53, 0x80, 0xb1, 0x2b, 0x6b, 0x6b, 0x95, 0xcb, 0x24, 0xd2, 0x56, 0x65, 0x67,
+ 0xb5, 0x3f, 0xd6, 0xb4, 0x97, 0x9d, 0xde, 0x88, 0xed, 0xc8, 0x6d, 0xcc, 0xf2, 0xe8, 0x7f, 0xb3,
+ 0x65, 0x2f, 0xba, 0x1e, 0x54, 0xa3, 0xc0, 0xf5, 0x36, 0x32, 0x57, 0xba, 0xe4, 0x59, 0x8a, 0x79,
+ 0x3c, 0x0b, 0x7a, 0x01, 0x06, 0x58, 0xf8, 0x41, 0x39, 0x09, 0x8f, 0xa8, 0x27, 0x16, 0x2b, 0xdd,
+ 0xdf, 0x2d, 0x0d, 0xdf, 0xc0, 0x65, 0xfe, 0x07, 0x0b, 0x54, 0x74, 0x03, 0x46, 0x36, 0xa3, 0xa8,
+ 0x75, 0x85, 0x38, 0x75, 0x12, 0xc8, 0x53, 0xf6, 0x4c, 0xd6, 0x29, 0x4b, 0x07, 0x81, 0xa3, 0xc5,
+ 0x07, 0x53, 0x5c, 0x16, 0x62, 0x9d, 0x8e, 0x5d, 0x05, 0x88, 0x61, 0x0f, 0x48, 0x71, 0x63, 0xaf,
+ 0xc1, 0x30, 0xfd, 0xdc, 0xb9, 0x86, 0xeb, 0x74, 0x56, 0x8d, 0x3f, 0x03, 0xc3, 0x52, 0xf1, 0x1d,
+ 0x8a, 0xe0, 0x20, 0xec, 0x46, 0x92, 0x7a, 0xf1, 0x10, 0xc7, 0x70, 0xfb, 0x71, 0x10, 0xb6, 0xc3,
+ 0x9d, 0x48, 0xda, 0xeb, 0x70, 0x94, 0x19, 0x41, 0x3b, 0xd1, 0xa6, 0xb1, 0x46, 0xbb, 0x2f, 0x86,
+ 0x67, 0xc5, 0xbb, 0xae, 0xa0, 0xec, 0x7d, 0xa4, 0xf3, 0xf9, 0xa8, 0xa4, 0x18, 0xbf, 0xf1, 0xec,
+ 0x7f, 0xec, 0x83, 0x47, 0xca, 0xd5, 0xfc, 0xa8, 0x56, 0x97, 0x60, 0x94, 0xb3, 0x8b, 0x74, 0x69,
+ 0x38, 0x0d, 0xd1, 0xae, 0x92, 0x80, 0xae, 0x69, 0x30, 0x6c, 0x60, 0xa2, 0xd3, 0x50, 0x74, 0xdf,
+ 0xf5, 0x92, 0xae, 0x99, 0xe5, 0xb7, 0x56, 0x31, 0x2d, 0xa7, 0x60, 0xca, 0x79, 0xf2, 0x23, 0x5d,
+ 0x81, 0x15, 0xf7, 0xf9, 0x3a, 0x8c, 0xbb, 0x61, 0x2d, 0x74, 0xcb, 0x1e, 0xdd, 0xa7, 0xda, 0x4e,
+ 0x57, 0x32, 0x07, 0xda, 0x69, 0x05, 0xc5, 0x09, 0x6c, 0xed, 0x7e, 0xe9, 0xef, 0x99, 0x7b, 0xed,
+ 0x1a, 0x53, 0x83, 0x1e, 0xff, 0x2d, 0xf6, 0x75, 0x21, 0x13, 0xc1, 0x8b, 0xe3, 0x9f, 0x7f, 0x70,
+ 0x88, 0x25, 0x8c, 0x3e, 0xe8, 0x6a, 0x9b, 0x4e, 0x6b, 0xae, 0x1d, 0x6d, 0x2e, 0xba, 0x61, 0xcd,
+ 0xdf, 0x26, 0xc1, 0x0e, 0x7b, 0x8b, 0x0f, 0xc5, 0x0f, 0x3a, 0x05, 0x58, 0xb8, 0x32, 0x57, 0xa1,
+ 0x98, 0x38, 0x5d, 0x07, 0xcd, 0xc1, 0x84, 0x2c, 0xac, 0x92, 0x90, 0x5d, 0x01, 0x23, 0x8c, 0x8c,
+ 0x72, 0x96, 0x14, 0xc5, 0x8a, 0x48, 0x12, 0xdf, 0x64, 0x70, 0xe1, 0x41, 0x30, 0xb8, 0x2f, 0xc3,
+ 0x98, 0xeb, 0xb9, 0x91, 0xeb, 0x44, 0x3e, 0xd7, 0x1f, 0xf1, 0x67, 0x37, 0x13, 0x30, 0x97, 0x75,
+ 0x00, 0x36, 0xf1, 0xec, 0xff, 0xb3, 0x0f, 0xa6, 0xd8, 0xb4, 0x7d, 0xb0, 0xc2, 0xbe, 0x97, 0x56,
+ 0xd8, 0x8d, 0xf4, 0x0a, 0x7b, 0x10, 0x9c, 0xfb, 0x7d, 0x2f, 0xb3, 0x2f, 0x58, 0x30, 0xc5, 0x64,
+ 0xdc, 0xc6, 0x32, 0xbb, 0x00, 0xc3, 0x81, 0xe1, 0xc7, 0x3a, 0xac, 0x2b, 0xb5, 0xa4, 0x4b, 0x6a,
+ 0x8c, 0x83, 0xde, 0x00, 0x68, 0xc5, 0x32, 0xf4, 0x82, 0x11, 0x7c, 0x14, 0x72, 0xc5, 0xe7, 0x5a,
+ 0x1d, 0xfb, 0xb3, 0x30, 0xac, 0x1c, 0x55, 0xa5, 0xa7, 0xba, 0x95, 0xe3, 0xa9, 0xde, 0x9d, 0x8d,
+ 0x90, 0xb6, 0x71, 0xc5, 0x4c, 0xdb, 0xb8, 0xff, 0xcb, 0x82, 0x58, 0xc3, 0x81, 0xde, 0x82, 0xe1,
+ 0x96, 0xcf, 0x4c, 0xa9, 0x03, 0xe9, 0x9f, 0xf0, 0x78, 0x47, 0x15, 0x09, 0x8f, 0x30, 0x18, 0xf0,
+ 0xe9, 0xa8, 0xc8, 0xaa, 0x38, 0xa6, 0x82, 0xae, 0xc2, 0x60, 0x2b, 0x20, 0xd5, 0x88, 0x85, 0xbf,
+ 0xea, 0x9d, 0x20, 0x5f, 0xbe, 0xbc, 0x22, 0x96, 0x14, 0x12, 0x96, 0xa9, 0xc5, 0xde, 0x2d, 0x53,
+ 0xed, 0xdf, 0x2a, 0xc0, 0x64, 0xb2, 0x11, 0xf4, 0x1a, 0xf4, 0x91, 0xbb, 0xa4, 0x26, 0xbe, 0x34,
+ 0x93, 0x9b, 0x88, 0xa5, 0x2b, 0x7c, 0xe8, 0xe8, 0x7f, 0xcc, 0x6a, 0xa1, 0x2b, 0x30, 0x48, 0x59,
+ 0x89, 0xcb, 0x2a, 0x48, 0xe4, 0xa3, 0x79, 0xec, 0x88, 0xe2, 0xc9, 0xf8, 0x67, 0x89, 0x22, 0x2c,
+ 0xab, 0x33, 0x53, 0xb6, 0x5a, 0xab, 0x4a, 0x5f, 0x69, 0x51, 0x27, 0x61, 0xc2, 0xda, 0x42, 0x85,
+ 0x23, 0x09, 0x6a, 0xdc, 0x94, 0x4d, 0x16, 0xe2, 0x98, 0x08, 0x7a, 0x03, 0xfa, 0xc3, 0x06, 0x21,
+ 0x2d, 0x61, 0xab, 0x90, 0x29, 0x1f, 0xad, 0x52, 0x04, 0x41, 0x89, 0xc9, 0x53, 0x58, 0x01, 0xe6,
+ 0x15, 0xed, 0xdf, 0xb5, 0x00, 0xb8, 0xed, 0x9f, 0xe3, 0x6d, 0x90, 0x43, 0x50, 0x29, 0x2c, 0x42,
+ 0x5f, 0xd8, 0x22, 0xb5, 0x4e, 0x1e, 0x06, 0x71, 0x7f, 0xaa, 0x2d, 0x52, 0x8b, 0x57, 0x3b, 0xfd,
+ 0x87, 0x59, 0x6d, 0xfb, 0x47, 0x01, 0xc6, 0x63, 0xb4, 0x72, 0x44, 0x9a, 0xe8, 0x39, 0x23, 0xb2,
+ 0xce, 0xc9, 0x44, 0x64, 0x9d, 0x61, 0x86, 0xad, 0x49, 0xaf, 0x3f, 0x0b, 0xc5, 0xa6, 0x73, 0x57,
+ 0x88, 0x27, 0x9f, 0xe9, 0xdc, 0x0d, 0x4a, 0x7f, 0x76, 0xc5, 0xb9, 0xcb, 0x5f, 0xf0, 0xcf, 0xc8,
+ 0xdd, 0xb9, 0xe2, 0xdc, 0xed, 0x6a, 0x05, 0x4f, 0x1b, 0x61, 0x6d, 0xb9, 0x9e, 0x30, 0x6b, 0xeb,
+ 0xa9, 0x2d, 0xd7, 0x4b, 0xb6, 0xe5, 0x7a, 0x3d, 0xb4, 0xe5, 0x7a, 0xe8, 0x1e, 0x0c, 0x0a, 0xab,
+ 0x53, 0x11, 0xf2, 0xef, 0x42, 0x0f, 0xed, 0x09, 0xa3, 0x55, 0xde, 0xe6, 0x05, 0x29, 0xa1, 0x10,
+ 0xa5, 0x5d, 0xdb, 0x95, 0x0d, 0xa2, 0xff, 0xd4, 0x82, 0x71, 0xf1, 0x1b, 0x93, 0x77, 0xdb, 0x24,
+ 0x8c, 0x04, 0x07, 0xff, 0x91, 0xde, 0xfb, 0x20, 0x2a, 0xf2, 0xae, 0x7c, 0x44, 0x5e, 0xb6, 0x26,
+ 0xb0, 0x6b, 0x8f, 0x12, 0xbd, 0x40, 0xbf, 0x65, 0xc1, 0xd1, 0xa6, 0x73, 0x97, 0xb7, 0xc8, 0xcb,
+ 0xb0, 0x13, 0xb9, 0xbe, 0xb0, 0xde, 0x78, 0xad, 0xb7, 0xe9, 0x4f, 0x55, 0xe7, 0x9d, 0x94, 0xaa,
+ 0xda, 0xa3, 0x59, 0x28, 0x5d, 0xbb, 0x9a, 0xd9, 0xaf, 0x99, 0x75, 0x18, 0x92, 0xeb, 0xed, 0x61,
+ 0x9a, 0xd4, 0xb3, 0x76, 0xc4, 0x5a, 0x7b, 0xa8, 0xed, 0x7c, 0x16, 0x46, 0xf5, 0x35, 0xf6, 0x50,
+ 0xdb, 0x7a, 0x17, 0x8e, 0x64, 0xac, 0xa5, 0x87, 0xda, 0xe4, 0x1d, 0x38, 0x99, 0xbb, 0x3e, 0x1e,
+ 0xaa, 0x4b, 0xc4, 0xef, 0x58, 0xfa, 0x39, 0x78, 0x08, 0x7a, 0x9d, 0x05, 0x53, 0xaf, 0x73, 0xa6,
+ 0xf3, 0xce, 0xc9, 0x51, 0xee, 0xbc, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8, 0x4d, 0x18, 0x68, 0xd0,
+ 0x12, 0x69, 0xbb, 0x6c, 0x77, 0xdf, 0x91, 0x31, 0x47, 0xcd, 0xca, 0x43, 0x2c, 0x28, 0xd8, 0x5f,
+ 0xb1, 0x20, 0xc3, 0xa9, 0x83, 0x72, 0x58, 0x6d, 0xb7, 0xce, 0x86, 0xa4, 0x18, 0x73, 0x58, 0x2a,
+ 0xf0, 0xcc, 0x69, 0x28, 0x6e, 0xb8, 0x75, 0xe1, 0xcd, 0xac, 0xc0, 0x97, 0x29, 0x78, 0xc3, 0xad,
+ 0xa3, 0x65, 0x40, 0x61, 0xbb, 0xd5, 0x6a, 0x30, 0x83, 0x27, 0xa7, 0x71, 0x39, 0xf0, 0xdb, 0x2d,
+ 0x6e, 0xa8, 0x5c, 0xe4, 0xe2, 0xa5, 0x6a, 0x0a, 0x8a, 0x33, 0x6a, 0xd8, 0x7f, 0x60, 0x41, 0xdf,
+ 0x21, 0x4c, 0x13, 0x36, 0xa7, 0xe9, 0xb9, 0x5c, 0xd2, 0x22, 0x53, 0xc4, 0x2c, 0x76, 0xee, 0x2c,
+ 0xdd, 0x8d, 0x88, 0x17, 0x32, 0x86, 0x23, 0x73, 0xd6, 0x76, 0x2d, 0x38, 0x72, 0xcd, 0x77, 0xea,
+ 0xf3, 0x4e, 0xc3, 0xf1, 0x6a, 0x24, 0x28, 0x7b, 0x1b, 0x07, 0xf2, 0x0a, 0x28, 0x74, 0xf5, 0x0a,
+ 0xb8, 0x04, 0x03, 0x6e, 0x4b, 0x0b, 0x35, 0x7f, 0x96, 0xce, 0x6e, 0xb9, 0x22, 0xa2, 0xcc, 0x23,
+ 0xa3, 0x71, 0x56, 0x8a, 0x05, 0x3e, 0x5d, 0x96, 0xdc, 0x1c, 0xaf, 0x2f, 0x7f, 0x59, 0xd2, 0x57,
+ 0x52, 0x32, 0x84, 0x9a, 0x61, 0x38, 0xbe, 0x09, 0x46, 0x13, 0xc2, 0x4d, 0x0a, 0xc3, 0xa0, 0xcb,
+ 0xbf, 0x54, 0xac, 0xcd, 0x27, 0xb3, 0x5f, 0x2f, 0xa9, 0x81, 0xd1, 0xfc, 0x01, 0x79, 0x01, 0x96,
+ 0x84, 0xec, 0x4b, 0x90, 0x19, 0xf2, 0xa6, 0xbb, 0x64, 0xca, 0xfe, 0x04, 0x4c, 0xb1, 0x9a, 0x07,
+ 0x94, 0xfa, 0xd8, 0x09, 0x79, 0x7a, 0x46, 0xd4, 0x60, 0xfb, 0x7f, 0xb5, 0x00, 0xad, 0xf8, 0x75,
+ 0x77, 0x7d, 0x47, 0x10, 0xe7, 0xdf, 0xff, 0x2e, 0x94, 0xf8, 0xb3, 0x3a, 0x19, 0x59, 0x77, 0xa1,
+ 0xe1, 0x84, 0xa1, 0x26, 0xcb, 0x7f, 0x52, 0xb4, 0x5b, 0x5a, 0xeb, 0x8c, 0x8e, 0xbb, 0xd1, 0x43,
+ 0x6f, 0x25, 0x02, 0x1d, 0x7e, 0x34, 0x15, 0xe8, 0xf0, 0xc9, 0x4c, 0x8b, 0x9a, 0x74, 0xef, 0x65,
+ 0x00, 0x44, 0xfb, 0x8b, 0x16, 0x4c, 0xac, 0x26, 0x22, 0xc5, 0x9e, 0x63, 0xe6, 0x05, 0x19, 0x3a,
+ 0xaa, 0x2a, 0x2b, 0xc5, 0x02, 0xfa, 0xc0, 0x65, 0xb8, 0xdf, 0xb6, 0x20, 0x0e, 0xb1, 0x75, 0x08,
+ 0x2c, 0xf7, 0x82, 0xc1, 0x72, 0x67, 0x3e, 0x5f, 0x54, 0x77, 0xf2, 0x38, 0x6e, 0x74, 0x55, 0xcd,
+ 0x49, 0x87, 0x97, 0x4b, 0x4c, 0x86, 0xef, 0xb3, 0x71, 0x73, 0xe2, 0xd4, 0x6c, 0x7c, 0xa3, 0x00,
+ 0x48, 0xe1, 0xf6, 0x1c, 0x1c, 0x33, 0x5d, 0xe3, 0xc1, 0x04, 0xc7, 0xdc, 0x06, 0xc4, 0x0c, 0x64,
+ 0x02, 0xc7, 0x0b, 0x39, 0x59, 0x57, 0x48, 0xad, 0x0f, 0x66, 0x7d, 0x33, 0x23, 0xbd, 0x65, 0xaf,
+ 0xa5, 0xa8, 0xe1, 0x8c, 0x16, 0x34, 0xc3, 0xa7, 0xfe, 0x5e, 0x0d, 0x9f, 0x06, 0xba, 0xb8, 0x7d,
+ 0x7f, 0xcd, 0x82, 0x31, 0x35, 0x4c, 0xef, 0x13, 0xe7, 0x11, 0xd5, 0x9f, 0x9c, 0x7b, 0xa5, 0xa2,
+ 0x75, 0x99, 0x31, 0x03, 0xdf, 0xc7, 0xdc, 0xf7, 0x9d, 0x86, 0x7b, 0x8f, 0xa8, 0x18, 0xce, 0x25,
+ 0xe1, 0x8e, 0x2f, 0x4a, 0xf7, 0x77, 0x4b, 0x63, 0xea, 0x1f, 0x8f, 0x1a, 0x1b, 0x57, 0xb1, 0x7f,
+ 0x99, 0x6e, 0x76, 0x73, 0x29, 0xa2, 0x97, 0xa0, 0xbf, 0xb5, 0xe9, 0x84, 0x24, 0xe1, 0x64, 0xd7,
+ 0x5f, 0xa1, 0x85, 0xfb, 0xbb, 0xa5, 0x71, 0x55, 0x81, 0x95, 0x60, 0x8e, 0xdd, 0x7b, 0xc8, 0xd1,
+ 0xf4, 0xe2, 0xec, 0x1a, 0x72, 0xf4, 0x5f, 0x2c, 0xe8, 0x5b, 0xa5, 0xb7, 0xd7, 0xc3, 0x3f, 0x02,
+ 0x5e, 0x37, 0x8e, 0x80, 0x53, 0x79, 0xd9, 0x8c, 0x72, 0x77, 0xff, 0x72, 0x62, 0xf7, 0x9f, 0xc9,
+ 0xa5, 0xd0, 0x79, 0xe3, 0x37, 0x61, 0x84, 0xe5, 0x48, 0x12, 0x0e, 0x85, 0x2f, 0x18, 0x1b, 0xbe,
+ 0x94, 0xd8, 0xf0, 0x13, 0x1a, 0xaa, 0xb6, 0xd3, 0x9f, 0x82, 0x41, 0xe1, 0xa1, 0x96, 0x8c, 0x82,
+ 0x20, 0x70, 0xb1, 0x84, 0xdb, 0xbf, 0x50, 0x04, 0x23, 0x27, 0x13, 0xfa, 0x23, 0x0b, 0x66, 0x03,
+ 0x6e, 0xb9, 0x5e, 0x5f, 0x6c, 0x07, 0xae, 0xb7, 0x51, 0xad, 0x6d, 0x92, 0x7a, 0xbb, 0xe1, 0x7a,
+ 0x1b, 0xe5, 0x0d, 0xcf, 0x57, 0xc5, 0x4b, 0x77, 0x49, 0xad, 0xcd, 0xb4, 0xca, 0x5d, 0x12, 0x40,
+ 0x29, 0x0f, 0x90, 0xe7, 0xf7, 0x76, 0x4b, 0xb3, 0xf8, 0x40, 0xb4, 0xf1, 0x01, 0xfb, 0x82, 0xfe,
+ 0xd2, 0x82, 0x0b, 0x3c, 0x37, 0x50, 0xef, 0xfd, 0xef, 0x20, 0xe1, 0xa8, 0x48, 0x52, 0x31, 0x91,
+ 0x35, 0x12, 0x34, 0xe7, 0x5f, 0x16, 0x03, 0x7a, 0xa1, 0x72, 0xb0, 0xb6, 0xf0, 0x41, 0x3b, 0x67,
+ 0xff, 0xb7, 0x45, 0x18, 0x13, 0xa1, 0x29, 0xc5, 0x1d, 0xf0, 0x92, 0xb1, 0x24, 0x1e, 0x4d, 0x2c,
+ 0x89, 0x29, 0x03, 0xf9, 0xc1, 0x1c, 0xff, 0x21, 0x4c, 0xd1, 0xc3, 0xf9, 0x0a, 0x71, 0x82, 0xe8,
+ 0x36, 0x71, 0xb8, 0x3d, 0x63, 0xf1, 0xc0, 0xa7, 0xbf, 0x12, 0xac, 0x5f, 0x4b, 0x12, 0xc3, 0x69,
+ 0xfa, 0xdf, 0x4b, 0x77, 0x8e, 0x07, 0x93, 0xa9, 0xe8, 0xa2, 0x6f, 0xc3, 0xb0, 0x72, 0xaf, 0x12,
+ 0x87, 0x4e, 0xe7, 0x20, 0xbd, 0x49, 0x0a, 0x5c, 0xe8, 0x19, 0xbb, 0xf6, 0xc5, 0xe4, 0xec, 0xdf,
+ 0x2e, 0x18, 0x0d, 0xf2, 0x49, 0x5c, 0x85, 0x21, 0x27, 0x64, 0x81, 0xc3, 0xeb, 0x9d, 0x24, 0xda,
+ 0xa9, 0x66, 0x98, 0x8b, 0xdb, 0x9c, 0xa8, 0x89, 0x15, 0x0d, 0x74, 0x85, 0x5b, 0x8d, 0x6e, 0x93,
+ 0x4e, 0xe2, 0xec, 0x14, 0x35, 0x90, 0x76, 0xa5, 0xdb, 0x04, 0x8b, 0xfa, 0xe8, 0x53, 0xdc, 0xac,
+ 0xf7, 0xaa, 0xe7, 0xdf, 0xf1, 0x2e, 0xfb, 0xbe, 0x0c, 0x43, 0xd4, 0x1b, 0xc1, 0x29, 0x69, 0xcc,
+ 0xab, 0xaa, 0x63, 0x93, 0x5a, 0x6f, 0xe1, 0xba, 0x3f, 0x07, 0x2c, 0x17, 0x8a, 0x19, 0xcd, 0x20,
+ 0x44, 0x04, 0x26, 0x44, 0xdc, 0x53, 0x59, 0x26, 0xc6, 0x2e, 0xf3, 0xf9, 0x6d, 0xd6, 0x8e, 0x35,
+ 0x40, 0x57, 0x4d, 0x12, 0x38, 0x49, 0xd3, 0xde, 0xe4, 0x87, 0xf0, 0x32, 0x71, 0xa2, 0x76, 0x40,
+ 0x42, 0xf4, 0x71, 0x98, 0x4e, 0xbf, 0x8c, 0x85, 0x22, 0xc5, 0x62, 0xdc, 0xf3, 0xa9, 0xbd, 0xdd,
+ 0xd2, 0x74, 0x35, 0x07, 0x07, 0xe7, 0xd6, 0xb6, 0x7f, 0xcd, 0x02, 0xe6, 0x43, 0x7e, 0x08, 0x9c,
+ 0xcf, 0xc7, 0x4c, 0xce, 0x67, 0x3a, 0x6f, 0x3a, 0x73, 0x98, 0x9e, 0x17, 0xf9, 0x1a, 0xae, 0x04,
+ 0xfe, 0xdd, 0x1d, 0x61, 0xf5, 0xd5, 0xfd, 0x19, 0x67, 0x7f, 0xd9, 0x02, 0x96, 0x38, 0x08, 0xf3,
+ 0x57, 0xbb, 0x54, 0x70, 0x74, 0x37, 0x68, 0xf8, 0x38, 0x0c, 0xad, 0x8b, 0xe1, 0xcf, 0x10, 0x3a,
+ 0x19, 0x1d, 0x36, 0x69, 0xcb, 0x49, 0x13, 0xbe, 0xa0, 0xe2, 0x1f, 0x56, 0xd4, 0xec, 0xff, 0xd2,
+ 0x82, 0x99, 0xfc, 0x6a, 0xe8, 0x06, 0x9c, 0x08, 0x48, 0xad, 0x1d, 0x84, 0x74, 0x4b, 0x88, 0x07,
+ 0x90, 0x70, 0xa7, 0xe2, 0x53, 0xfd, 0xc8, 0xde, 0x6e, 0xe9, 0x04, 0xce, 0x46, 0xc1, 0x79, 0x75,
+ 0xd1, 0x2b, 0x30, 0xde, 0x0e, 0x39, 0xe7, 0xc7, 0x98, 0xae, 0x50, 0x44, 0xa7, 0x66, 0x1e, 0x47,
+ 0x37, 0x0c, 0x08, 0x4e, 0x60, 0xda, 0x3f, 0xc0, 0x97, 0xa3, 0x0a, 0x50, 0xdd, 0x84, 0x29, 0x4f,
+ 0xfb, 0x4f, 0x6f, 0x40, 0xf9, 0xd4, 0x7f, 0xbc, 0xdb, 0xad, 0xcf, 0xae, 0x4b, 0xcd, 0xcb, 0x3d,
+ 0x41, 0x06, 0xa7, 0x29, 0xdb, 0xbf, 0x68, 0xc1, 0x09, 0x1d, 0x51, 0x73, 0xa4, 0xeb, 0xa6, 0x05,
+ 0x5c, 0x84, 0x21, 0xbf, 0x45, 0x02, 0x27, 0xf2, 0x03, 0x71, 0xcd, 0x9d, 0x97, 0x2b, 0xf4, 0xba,
+ 0x28, 0xdf, 0x17, 0x09, 0x73, 0x24, 0x75, 0x59, 0x8e, 0x55, 0x4d, 0x64, 0xc3, 0x00, 0x13, 0x20,
+ 0x86, 0xc2, 0x65, 0x92, 0x1d, 0x5a, 0xcc, 0xb2, 0x25, 0xc4, 0x02, 0x62, 0xff, 0xa3, 0xc5, 0xd7,
+ 0xa7, 0xde, 0x75, 0xf4, 0x2e, 0x4c, 0x36, 0x9d, 0xa8, 0xb6, 0xb9, 0x74, 0xb7, 0x15, 0x70, 0xe5,
+ 0xae, 0x1c, 0xa7, 0x67, 0xba, 0x8d, 0x93, 0xf6, 0x91, 0xb1, 0x69, 0xf5, 0x4a, 0x82, 0x18, 0x4e,
+ 0x91, 0x47, 0xb7, 0x61, 0x84, 0x95, 0x31, 0x6f, 0xe0, 0xb0, 0x13, 0x2f, 0x93, 0xd7, 0x9a, 0x32,
+ 0x0e, 0x5a, 0x89, 0xe9, 0x60, 0x9d, 0xa8, 0xfd, 0xd5, 0x22, 0x3f, 0x34, 0xd8, 0xdb, 0xe3, 0x29,
+ 0x18, 0x6c, 0xf9, 0xf5, 0x85, 0xf2, 0x22, 0x16, 0xb3, 0xa0, 0xee, 0xbd, 0x0a, 0x2f, 0xc6, 0x12,
+ 0x8e, 0xce, 0xc3, 0x90, 0xf8, 0x29, 0x95, 0xf1, 0x6c, 0x8f, 0x08, 0xbc, 0x10, 0x2b, 0x28, 0x7a,
+ 0x1e, 0xa0, 0x15, 0xf8, 0xdb, 0x6e, 0x9d, 0x45, 0x7f, 0x2a, 0x9a, 0x76, 0x7d, 0x15, 0x05, 0xc1,
+ 0x1a, 0x16, 0x7a, 0x15, 0xc6, 0xda, 0x5e, 0xc8, 0xf9, 0x27, 0x2d, 0xc6, 0xbe, 0xb2, 0x38, 0xbb,
+ 0xa1, 0x03, 0xb1, 0x89, 0x8b, 0xe6, 0x60, 0x20, 0x72, 0x98, 0x9d, 0x5a, 0x7f, 0xbe, 0xf9, 0xfd,
+ 0x1a, 0xc5, 0xd0, 0xb3, 0xd9, 0xd1, 0x0a, 0x58, 0x54, 0x44, 0x6f, 0x4b, 0xc7, 0x7c, 0x7e, 0x13,
+ 0x09, 0xbf, 0x97, 0xde, 0x6e, 0x2d, 0xcd, 0x2d, 0x5f, 0xf8, 0xd3, 0x18, 0xb4, 0xd0, 0x2b, 0x00,
+ 0xe4, 0x6e, 0x44, 0x02, 0xcf, 0x69, 0x28, 0xeb, 0x52, 0xc5, 0xc8, 0x2c, 0xfa, 0xab, 0x7e, 0x74,
+ 0x23, 0x24, 0x4b, 0x0a, 0x03, 0x6b, 0xd8, 0xf6, 0x8f, 0x8e, 0x00, 0xc4, 0x0f, 0x0d, 0x74, 0x0f,
+ 0x86, 0x6a, 0x4e, 0xcb, 0xa9, 0xf1, 0x54, 0xad, 0xc5, 0x3c, 0x7f, 0xe9, 0xb8, 0xc6, 0xec, 0x82,
+ 0x40, 0xe7, 0xca, 0x1b, 0x19, 0xa6, 0x7c, 0x48, 0x16, 0x77, 0x55, 0xd8, 0xa8, 0xf6, 0xd0, 0x17,
+ 0x2c, 0x18, 0x11, 0xd1, 0x95, 0xd8, 0x0c, 0x15, 0xf2, 0xf5, 0x6d, 0x5a, 0xfb, 0x73, 0x71, 0x0d,
+ 0xde, 0x85, 0x17, 0xe4, 0x0a, 0xd5, 0x20, 0x5d, 0x7b, 0xa1, 0x37, 0x8c, 0x3e, 0x2c, 0xdf, 0xb6,
+ 0x45, 0x63, 0x28, 0xd5, 0xdb, 0x76, 0x98, 0x5d, 0x35, 0xfa, 0xb3, 0xf6, 0x86, 0xf1, 0xac, 0xed,
+ 0xcb, 0xf7, 0x3c, 0x36, 0xf8, 0xed, 0x6e, 0x2f, 0x5a, 0x54, 0xd1, 0xa3, 0x90, 0xf4, 0xe7, 0xbb,
+ 0xcb, 0x6a, 0x0f, 0xbb, 0x2e, 0x11, 0x48, 0x3e, 0x0b, 0x13, 0x75, 0x93, 0x6b, 0x11, 0x2b, 0xf1,
+ 0xc9, 0x3c, 0xba, 0x09, 0x26, 0x27, 0xe6, 0x53, 0x12, 0x00, 0x9c, 0x24, 0x8c, 0x2a, 0x3c, 0x28,
+ 0x4d, 0xd9, 0x5b, 0xf7, 0x85, 0xef, 0x95, 0x9d, 0x3b, 0x97, 0x3b, 0x61, 0x44, 0x9a, 0x14, 0x33,
+ 0x66, 0x12, 0x56, 0x45, 0x5d, 0xac, 0xa8, 0xa0, 0x37, 0x61, 0x80, 0xf9, 0x4b, 0x86, 0xd3, 0x43,
+ 0xf9, 0x6a, 0x0d, 0x33, 0xfa, 0x6a, 0xbc, 0x21, 0xd9, 0xdf, 0x10, 0x0b, 0x0a, 0xe8, 0x8a, 0xf4,
+ 0x46, 0x0e, 0xcb, 0xde, 0x8d, 0x90, 0x30, 0x6f, 0xe4, 0xe1, 0xf9, 0xc7, 0x63, 0x47, 0x63, 0x5e,
+ 0x9e, 0x99, 0xf3, 0xd6, 0xa8, 0x49, 0xd9, 0x3e, 0xf1, 0x5f, 0xa6, 0xd2, 0x15, 0xb1, 0xe2, 0x32,
+ 0xbb, 0x67, 0xa6, 0xdb, 0x8d, 0x87, 0xf3, 0xa6, 0x49, 0x02, 0x27, 0x69, 0x52, 0x16, 0x9a, 0xef,
+ 0x7a, 0xe1, 0xbd, 0xd5, 0xed, 0xec, 0xe0, 0x92, 0x03, 0x76, 0x1b, 0xf1, 0x12, 0x2c, 0xea, 0x23,
+ 0x17, 0x26, 0x02, 0x83, 0xbd, 0x90, 0x21, 0xde, 0xce, 0xf5, 0xc6, 0xc4, 0x68, 0xc9, 0x03, 0x4c,
+ 0x32, 0x38, 0x49, 0x17, 0xbd, 0xa9, 0x31, 0x4a, 0x63, 0x9d, 0x5f, 0xfe, 0xdd, 0x58, 0xa3, 0x99,
+ 0x2d, 0x18, 0x33, 0x0e, 0x9b, 0x87, 0xaa, 0x82, 0xf4, 0x60, 0x32, 0x79, 0xb2, 0x3c, 0x54, 0xcd,
+ 0xe3, 0x2b, 0x30, 0xce, 0x36, 0xc2, 0x1d, 0xa7, 0x25, 0x8e, 0xe2, 0xf3, 0xc6, 0x51, 0x6c, 0x9d,
+ 0x2f, 0xf2, 0x81, 0x91, 0x43, 0x10, 0x1f, 0x9c, 0xf6, 0xaf, 0xf4, 0x8b, 0xca, 0x6a, 0x17, 0xa1,
+ 0x0b, 0x30, 0x2c, 0x3a, 0xa0, 0x32, 0x70, 0xa9, 0x83, 0x61, 0x45, 0x02, 0x70, 0x8c, 0xc3, 0x12,
+ 0xaf, 0xb1, 0xea, 0x9a, 0x87, 0x42, 0x9c, 0x78, 0x4d, 0x41, 0xb0, 0x86, 0x45, 0x1f, 0xbf, 0xb7,
+ 0x7d, 0x3f, 0x52, 0x77, 0xb0, 0xda, 0x6a, 0xf3, 0xac, 0x14, 0x0b, 0x28, 0xbd, 0x7b, 0xb7, 0x48,
+ 0xe0, 0x91, 0x86, 0x99, 0x82, 0x42, 0xdd, 0xbd, 0x57, 0x75, 0x20, 0x36, 0x71, 0x29, 0x07, 0xe1,
+ 0x87, 0x6c, 0xef, 0x8a, 0x27, 0x76, 0xec, 0xf1, 0x51, 0xe5, 0xb1, 0x2b, 0x24, 0x1c, 0x7d, 0x02,
+ 0x4e, 0xa8, 0x70, 0x8f, 0x62, 0x65, 0xca, 0x16, 0x07, 0x0c, 0x89, 0xd8, 0x89, 0x85, 0x6c, 0x34,
+ 0x9c, 0x57, 0x1f, 0xbd, 0x0e, 0xe3, 0xe2, 0x19, 0x26, 0x29, 0x0e, 0x9a, 0xe6, 0x8b, 0x57, 0x0d,
+ 0x28, 0x4e, 0x60, 0xcb, 0x24, 0x1a, 0xec, 0x7d, 0x22, 0x29, 0x0c, 0xa5, 0x93, 0x68, 0xe8, 0x70,
+ 0x9c, 0xaa, 0x81, 0xe6, 0x60, 0x82, 0xb3, 0x9d, 0xae, 0xb7, 0xc1, 0xe7, 0x44, 0xf8, 0x93, 0xaa,
+ 0x0d, 0x79, 0xdd, 0x04, 0xe3, 0x24, 0x3e, 0xba, 0x04, 0xa3, 0x4e, 0x50, 0xdb, 0x74, 0x23, 0x52,
+ 0xa3, 0xbb, 0x8a, 0x59, 0x10, 0x6a, 0xf6, 0x9f, 0x73, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x0d, 0xe8,
+ 0x0b, 0xef, 0x38, 0x2d, 0x71, 0xfa, 0xe4, 0x1f, 0xe5, 0x6a, 0x05, 0x73, 0xd3, 0x2f, 0xfa, 0x1f,
+ 0xb3, 0x9a, 0xf6, 0x3d, 0x38, 0x92, 0x11, 0x16, 0x87, 0x2e, 0x3d, 0xa7, 0xe5, 0xca, 0x51, 0x49,
+ 0xb8, 0x69, 0xcc, 0x55, 0xca, 0x72, 0x3c, 0x34, 0x2c, 0xba, 0xbe, 0x59, 0xf8, 0x1c, 0x2d, 0xdd,
+ 0xb8, 0x5a, 0xdf, 0xcb, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0x5f, 0x0b, 0x30, 0x91, 0xa1, 0x1e, 0x64,
+ 0x29, 0xaf, 0x13, 0xef, 0xbc, 0x38, 0xc3, 0xb5, 0x99, 0xd5, 0xa5, 0x70, 0x80, 0xac, 0x2e, 0xc5,
+ 0x6e, 0x59, 0x5d, 0xfa, 0xde, 0x4b, 0x56, 0x17, 0x73, 0xc4, 0xfa, 0x7b, 0x1a, 0xb1, 0x8c, 0x4c,
+ 0x30, 0x03, 0x07, 0xcc, 0x04, 0x63, 0x0c, 0xfa, 0x60, 0x0f, 0x83, 0xfe, 0xd3, 0x05, 0x98, 0x4c,
+ 0x6a, 0x16, 0x0f, 0x41, 0x3a, 0xff, 0xa6, 0x21, 0x9d, 0x3f, 0xdf, 0x4b, 0x04, 0x81, 0x5c, 0x49,
+ 0x3d, 0x4e, 0x48, 0xea, 0x9f, 0xee, 0x89, 0x5a, 0x67, 0xa9, 0xfd, 0x2f, 0x15, 0xe0, 0x58, 0xa6,
+ 0xc2, 0xf5, 0x10, 0xc6, 0xe6, 0xba, 0x31, 0x36, 0xcf, 0xf5, 0x1c, 0x5d, 0x21, 0x77, 0x80, 0x6e,
+ 0x25, 0x06, 0xe8, 0x42, 0xef, 0x24, 0x3b, 0x8f, 0xd2, 0x37, 0x8b, 0x70, 0x26, 0xb3, 0x5e, 0x2c,
+ 0xdc, 0x5e, 0x36, 0x84, 0xdb, 0xcf, 0x27, 0x84, 0xdb, 0x76, 0xe7, 0xda, 0x0f, 0x46, 0xda, 0x2d,
+ 0xa2, 0x0c, 0xb0, 0x58, 0x29, 0xf7, 0x29, 0xe9, 0x36, 0xa2, 0x0c, 0x28, 0x42, 0xd8, 0xa4, 0xfb,
+ 0xbd, 0x24, 0xe1, 0xfe, 0x1f, 0x2d, 0x38, 0x99, 0x39, 0x37, 0x87, 0x20, 0x67, 0x5c, 0x35, 0xe5,
+ 0x8c, 0x4f, 0xf5, 0xbc, 0x5a, 0x73, 0x04, 0x8f, 0x5f, 0x1c, 0xc8, 0xf9, 0x16, 0x26, 0xfe, 0xb8,
+ 0x0e, 0x23, 0x4e, 0xad, 0x46, 0xc2, 0x70, 0xc5, 0xaf, 0xab, 0x04, 0x10, 0xcf, 0xb1, 0xc7, 0x69,
+ 0x5c, 0xbc, 0xbf, 0x5b, 0x9a, 0x49, 0x92, 0x88, 0xc1, 0x58, 0xa7, 0x80, 0x3e, 0x05, 0x43, 0xa1,
+ 0xcc, 0xdd, 0xd9, 0x77, 0xff, 0xb9, 0x3b, 0x19, 0x27, 0xa9, 0xc4, 0x3b, 0x8a, 0x24, 0xfa, 0x7e,
+ 0x3d, 0x6a, 0x55, 0x07, 0xc1, 0x26, 0xef, 0xe4, 0x7d, 0xc4, 0xae, 0x7a, 0x1e, 0x60, 0x5b, 0xbd,
+ 0xa3, 0x92, 0xa2, 0x1b, 0xed, 0x85, 0xa5, 0x61, 0xa1, 0x37, 0x60, 0x32, 0xe4, 0x01, 0x5b, 0x63,
+ 0x13, 0x19, 0xbe, 0x16, 0x59, 0xcc, 0xbb, 0x6a, 0x02, 0x86, 0x53, 0xd8, 0x68, 0x59, 0xb6, 0xca,
+ 0x8c, 0xa1, 0xf8, 0xf2, 0x3c, 0x17, 0xb7, 0x28, 0x0c, 0xa2, 0x8e, 0x26, 0x27, 0x81, 0x0d, 0xbf,
+ 0x56, 0x13, 0x7d, 0x0a, 0x80, 0x2e, 0x22, 0x21, 0xc2, 0x19, 0xcc, 0x3f, 0x42, 0xe9, 0xd9, 0x52,
+ 0xcf, 0xf4, 0xc0, 0x60, 0xe1, 0x01, 0x16, 0x15, 0x11, 0xac, 0x11, 0x44, 0x0e, 0x8c, 0xc5, 0xff,
+ 0xe2, 0xac, 0xf4, 0xe7, 0x73, 0x5b, 0x48, 0x12, 0x67, 0xea, 0x8d, 0x45, 0x9d, 0x04, 0x36, 0x29,
+ 0xa2, 0x4f, 0xc2, 0xc9, 0xed, 0x5c, 0xbb, 0x23, 0xce, 0x4b, 0xb2, 0x34, 0xf3, 0xf9, 0xd6, 0x46,
+ 0xf9, 0xf5, 0xed, 0xff, 0x09, 0xe0, 0x91, 0x0e, 0x27, 0x3d, 0x9a, 0x33, 0x6d, 0x06, 0x9e, 0x49,
+ 0xca, 0x55, 0x66, 0x32, 0x2b, 0x1b, 0x82, 0x96, 0xc4, 0x86, 0x2a, 0xbc, 0xe7, 0x0d, 0xf5, 0x13,
+ 0x96, 0xf6, 0xcc, 0xe2, 0x16, 0xe5, 0x1f, 0x3b, 0xe0, 0x0d, 0xf6, 0x00, 0x45, 0x60, 0xeb, 0x19,
+ 0x72, 0xa4, 0xe7, 0x7b, 0xee, 0x4e, 0xef, 0x82, 0xa5, 0xdf, 0xc9, 0x0e, 0x71, 0xcf, 0x45, 0x4c,
+ 0x97, 0x0f, 0xfa, 0xfd, 0x87, 0x15, 0xee, 0xfe, 0x1b, 0x16, 0x9c, 0x4c, 0x15, 0xf3, 0x3e, 0x90,
+ 0x50, 0x44, 0xe9, 0x5b, 0x7d, 0xcf, 0x9d, 0x97, 0x04, 0xf9, 0x37, 0x5c, 0x11, 0xdf, 0x70, 0x32,
+ 0x17, 0x2f, 0xd9, 0xf5, 0x2f, 0xfd, 0x7d, 0xe9, 0x08, 0x6b, 0xc0, 0x44, 0xc4, 0xf9, 0x5d, 0x47,
+ 0x2d, 0x38, 0x5b, 0x6b, 0x07, 0x41, 0xbc, 0x58, 0x33, 0x36, 0x27, 0x7f, 0x2d, 0x3e, 0xbe, 0xb7,
+ 0x5b, 0x3a, 0xbb, 0xd0, 0x05, 0x17, 0x77, 0xa5, 0x86, 0x3c, 0x40, 0xcd, 0x94, 0x75, 0x1f, 0x3b,
+ 0x00, 0x72, 0xa4, 0x40, 0x69, 0x5b, 0x40, 0x6e, 0xa7, 0x9b, 0x61, 0x23, 0x98, 0x41, 0xf9, 0x70,
+ 0x65, 0x37, 0xdf, 0x99, 0x78, 0xfa, 0x33, 0xd7, 0xe0, 0x4c, 0xe7, 0xc5, 0x74, 0xa0, 0x10, 0x14,
+ 0x7f, 0x63, 0xc1, 0xe9, 0x8e, 0x71, 0xce, 0xbe, 0x0b, 0x1f, 0x0b, 0xf6, 0xe7, 0x2d, 0x78, 0x34,
+ 0xb3, 0x46, 0xd2, 0x79, 0xb0, 0x46, 0x0b, 0x35, 0x63, 0xd8, 0x38, 0xe2, 0x8f, 0x04, 0xe0, 0x18,
+ 0xc7, 0xb0, 0x17, 0x2d, 0x74, 0xb5, 0x17, 0xfd, 0x53, 0x0b, 0x52, 0x57, 0xfd, 0x21, 0x70, 0x9e,
+ 0x65, 0x93, 0xf3, 0x7c, 0xbc, 0x97, 0xd1, 0xcc, 0x61, 0x3a, 0xff, 0x79, 0x02, 0x8e, 0xe7, 0x78,
+ 0x90, 0x6f, 0xc3, 0xd4, 0x46, 0x8d, 0x98, 0x21, 0x43, 0x3a, 0x85, 0xd2, 0xeb, 0x18, 0x5f, 0x64,
+ 0xfe, 0xd8, 0xde, 0x6e, 0x69, 0x2a, 0x85, 0x82, 0xd3, 0x4d, 0xa0, 0xcf, 0x5b, 0x70, 0xd4, 0xb9,
+ 0x13, 0x2e, 0xd1, 0x17, 0x84, 0x5b, 0x9b, 0x6f, 0xf8, 0xb5, 0x2d, 0xca, 0x98, 0xc9, 0x6d, 0xf5,
+ 0x62, 0xa6, 0x28, 0xfc, 0x56, 0x35, 0x85, 0x6f, 0x34, 0x3f, 0xbd, 0xb7, 0x5b, 0x3a, 0x9a, 0x85,
+ 0x85, 0x33, 0xdb, 0x42, 0x58, 0xe4, 0x38, 0x73, 0xa2, 0xcd, 0x4e, 0x41, 0x6d, 0xb2, 0x5c, 0xfd,
+ 0x39, 0x4b, 0x2c, 0x21, 0x58, 0xd1, 0x41, 0x9f, 0x81, 0xe1, 0x0d, 0x19, 0xbf, 0x22, 0x83, 0xe5,
+ 0x8e, 0x07, 0xb2, 0x73, 0x54, 0x0f, 0x6e, 0x80, 0xa3, 0x90, 0x70, 0x4c, 0x14, 0xbd, 0x0e, 0x45,
+ 0x6f, 0x3d, 0x14, 0xa1, 0xf5, 0xb2, 0xed, 0x80, 0x4d, 0x4b, 0x6b, 0x1e, 0x3a, 0x6a, 0x75, 0xb9,
+ 0x8a, 0x69, 0x45, 0x74, 0x05, 0x8a, 0xc1, 0xed, 0xba, 0xd0, 0xe3, 0x64, 0x6e, 0x52, 0x3c, 0xbf,
+ 0x98, 0xd3, 0x2b, 0x46, 0x09, 0xcf, 0x2f, 0x62, 0x4a, 0x02, 0x55, 0xa0, 0x9f, 0xb9, 0x5d, 0x0b,
+ 0xd6, 0x36, 0xf3, 0x29, 0xdf, 0x21, 0x7c, 0x01, 0xf7, 0x87, 0x64, 0x08, 0x98, 0x13, 0x42, 0x6b,
+ 0x30, 0x50, 0x73, 0xbd, 0x3a, 0x09, 0x04, 0x2f, 0xfb, 0xe1, 0x4c, 0x8d, 0x0d, 0xc3, 0xc8, 0xa1,
+ 0xc9, 0x15, 0x18, 0x0c, 0x03, 0x0b, 0x5a, 0x8c, 0x2a, 0x69, 0x6d, 0xae, 0xcb, 0x1b, 0x2b, 0x9b,
+ 0x2a, 0x69, 0x6d, 0x2e, 0x57, 0x3b, 0x52, 0x65, 0x18, 0x58, 0xd0, 0x42, 0xaf, 0x40, 0x61, 0xbd,
+ 0x26, 0x5c, 0xaa, 0x33, 0xc5, 0x9b, 0x66, 0xf4, 0xaf, 0xf9, 0x81, 0xbd, 0xdd, 0x52, 0x61, 0x79,
+ 0x01, 0x17, 0xd6, 0x6b, 0x68, 0x15, 0x06, 0xd7, 0x79, 0xbc, 0x20, 0x21, 0x1f, 0x7d, 0x32, 0x3b,
+ 0x94, 0x51, 0x2a, 0xa4, 0x10, 0xf7, 0x6d, 0x15, 0x00, 0x2c, 0x89, 0xb0, 0x94, 0x5b, 0x2a, 0xee,
+ 0x91, 0x08, 0xbb, 0x3a, 0x7b, 0xb0, 0x58, 0x55, 0xfc, 0xa9, 0x11, 0x47, 0x4f, 0xc2, 0x1a, 0x45,
+ 0xba, 0xaa, 0x9d, 0x7b, 0xed, 0x80, 0xe5, 0xe4, 0x10, 0x8a, 0x99, 0xcc, 0x55, 0x3d, 0x27, 0x91,
+ 0x3a, 0xad, 0x6a, 0x85, 0x84, 0x63, 0xa2, 0x68, 0x0b, 0xc6, 0xb6, 0xc3, 0xd6, 0x26, 0x91, 0x5b,
+ 0x9a, 0x85, 0xeb, 0xcb, 0xe1, 0x66, 0x6f, 0x0a, 0x44, 0x37, 0x88, 0xda, 0x4e, 0x23, 0x75, 0x0a,
+ 0xb1, 0x67, 0xcd, 0x4d, 0x9d, 0x18, 0x36, 0x69, 0xd3, 0xe1, 0x7f, 0xb7, 0xed, 0xdf, 0xde, 0x89,
+ 0x88, 0x88, 0x96, 0x9a, 0x39, 0xfc, 0x6f, 0x71, 0x94, 0xf4, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xd0,
+ 0x4d, 0x31, 0x3c, 0xec, 0xf4, 0x9c, 0xcc, 0x0f, 0xc5, 0x3e, 0x27, 0x91, 0x72, 0x06, 0x85, 0x9d,
+ 0x96, 0x31, 0x29, 0x76, 0x4a, 0xb6, 0x36, 0xfd, 0xc8, 0xf7, 0x12, 0x27, 0xf4, 0x54, 0xfe, 0x29,
+ 0x59, 0xc9, 0xc0, 0x4f, 0x9f, 0x92, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x54, 0x87, 0xf1, 0x96, 0x1f,
+ 0x44, 0x77, 0xfc, 0x40, 0xae, 0x2f, 0xd4, 0x41, 0x50, 0x6a, 0x60, 0x8a, 0x16, 0x99, 0x59, 0x90,
+ 0x09, 0xc1, 0x09, 0x9a, 0xe8, 0xe3, 0x30, 0x18, 0xd6, 0x9c, 0x06, 0x29, 0x5f, 0x9f, 0x3e, 0x92,
+ 0x7f, 0xfd, 0x54, 0x39, 0x4a, 0xce, 0xea, 0xe2, 0xe1, 0x9e, 0x38, 0x0a, 0x96, 0xe4, 0xd0, 0x32,
+ 0xf4, 0xb3, 0x54, 0xd6, 0x2c, 0xb4, 0x6f, 0x4e, 0x44, 0xf9, 0x94, 0x53, 0x0f, 0x3f, 0x9b, 0x58,
+ 0x31, 0xe6, 0xd5, 0xe9, 0x1e, 0x10, 0x92, 0x02, 0x3f, 0x9c, 0x3e, 0x96, 0xbf, 0x07, 0x84, 0x80,
+ 0xe1, 0x7a, 0xb5, 0xd3, 0x1e, 0x50, 0x48, 0x38, 0x26, 0x4a, 0x4f, 0x66, 0x7a, 0x9a, 0x1e, 0xef,
+ 0x60, 0xb0, 0x99, 0x7b, 0x96, 0xb2, 0x93, 0x99, 0x9e, 0xa4, 0x94, 0x84, 0xfd, 0xc7, 0x43, 0x69,
+ 0x9e, 0x85, 0x49, 0x98, 0xfe, 0x63, 0x2b, 0x65, 0xb1, 0xf1, 0x91, 0x5e, 0x05, 0xde, 0x0f, 0xf0,
+ 0xe1, 0xfa, 0x79, 0x0b, 0x8e, 0xb7, 0x32, 0x3f, 0x44, 0x30, 0x00, 0xbd, 0xc9, 0xcd, 0xf9, 0xa7,
+ 0xab, 0x30, 0xd0, 0xd9, 0x70, 0x9c, 0xd3, 0x52, 0x52, 0x38, 0x50, 0x7c, 0xcf, 0xc2, 0x81, 0x15,
+ 0x18, 0xaa, 0xf1, 0x97, 0x9c, 0x4c, 0x5f, 0xd0, 0x53, 0x10, 0x53, 0xae, 0xa7, 0x15, 0x15, 0xb1,
+ 0x22, 0x81, 0x7e, 0xd2, 0x82, 0xd3, 0xc9, 0xae, 0x63, 0xc2, 0xc0, 0xc2, 0x5c, 0x93, 0x8b, 0xb5,
+ 0x96, 0xc5, 0xf7, 0xa7, 0xf8, 0x7f, 0x03, 0x79, 0xbf, 0x1b, 0x02, 0xee, 0xdc, 0x18, 0x5a, 0xcc,
+ 0x90, 0xab, 0x0d, 0x98, 0x3a, 0xc9, 0x1e, 0x64, 0x6b, 0x2f, 0xc2, 0x68, 0xd3, 0x6f, 0x7b, 0x91,
+ 0xb0, 0xba, 0x14, 0xa6, 0x5b, 0xcc, 0x64, 0x69, 0x45, 0x2b, 0xc7, 0x06, 0x56, 0x42, 0x22, 0x37,
+ 0x74, 0xdf, 0x12, 0xb9, 0x77, 0x60, 0xd4, 0xd3, 0x1c, 0x12, 0x3a, 0xbd, 0x60, 0x85, 0x74, 0x51,
+ 0xc3, 0xe6, 0xbd, 0xd4, 0x4b, 0xb0, 0x41, 0xad, 0xb3, 0xb4, 0x0c, 0xde, 0x9b, 0xb4, 0xec, 0x50,
+ 0x9f, 0xc4, 0xf6, 0x6f, 0x16, 0x32, 0x5e, 0x0c, 0x5c, 0x2a, 0xf7, 0x9a, 0x29, 0x95, 0x3b, 0x97,
+ 0x94, 0xca, 0xa5, 0x54, 0x55, 0x86, 0x40, 0xae, 0xf7, 0x1c, 0x9a, 0x3d, 0x07, 0xa6, 0xfe, 0x61,
+ 0x0b, 0x4e, 0x30, 0xdd, 0x07, 0x6d, 0xe0, 0x3d, 0xeb, 0x3b, 0x98, 0x41, 0xec, 0xb5, 0x6c, 0x72,
+ 0x38, 0xaf, 0x1d, 0xbb, 0x01, 0x67, 0xbb, 0xdd, 0xbb, 0xcc, 0xbe, 0xb8, 0xae, 0xcc, 0x2b, 0x62,
+ 0xfb, 0xe2, 0x7a, 0x79, 0x11, 0x33, 0x48, 0xaf, 0x61, 0x17, 0xed, 0xff, 0xdb, 0x82, 0x62, 0xc5,
+ 0xaf, 0x1f, 0xc2, 0x8b, 0xfe, 0x63, 0xc6, 0x8b, 0xfe, 0x91, 0xec, 0x1b, 0xbf, 0x9e, 0xab, 0xec,
+ 0x5b, 0x4a, 0x28, 0xfb, 0x4e, 0xe7, 0x11, 0xe8, 0xac, 0xda, 0xfb, 0xe5, 0x22, 0x8c, 0x54, 0xfc,
+ 0xba, 0xda, 0x67, 0xff, 0xfd, 0xfd, 0xb8, 0x11, 0xe5, 0x66, 0xcd, 0xd2, 0x28, 0x33, 0x7b, 0x62,
+ 0x19, 0xf5, 0xe2, 0xbb, 0xcc, 0x9b, 0xe8, 0x16, 0x71, 0x37, 0x36, 0x23, 0x52, 0x4f, 0x7e, 0xce,
+ 0xe1, 0x79, 0x13, 0x7d, 0xab, 0x08, 0x13, 0x89, 0xd6, 0x51, 0x03, 0xc6, 0x1a, 0xba, 0x2a, 0x49,
+ 0xac, 0xd3, 0xfb, 0xd2, 0x42, 0x09, 0x6f, 0x0c, 0xad, 0x08, 0x9b, 0xc4, 0xd1, 0x2c, 0x80, 0xa7,
+ 0xdb, 0xa4, 0xab, 0x00, 0xcb, 0x9a, 0x3d, 0xba, 0x86, 0x81, 0x5e, 0x82, 0x91, 0xc8, 0x6f, 0xf9,
+ 0x0d, 0x7f, 0x63, 0xe7, 0x2a, 0x91, 0x11, 0x39, 0x95, 0xc9, 0xf2, 0x5a, 0x0c, 0xc2, 0x3a, 0x1e,
+ 0xba, 0x0b, 0x53, 0x8a, 0x48, 0xf5, 0x01, 0xa8, 0xd7, 0x98, 0xd8, 0x64, 0x35, 0x49, 0x11, 0xa7,
+ 0x1b, 0x41, 0xaf, 0xc0, 0x38, 0xb3, 0x9d, 0x66, 0xf5, 0xaf, 0x92, 0x1d, 0x19, 0xa9, 0x99, 0x71,
+ 0xd8, 0x2b, 0x06, 0x04, 0x27, 0x30, 0xd1, 0x02, 0x4c, 0x35, 0xdd, 0x30, 0x51, 0x7d, 0x80, 0x55,
+ 0x67, 0x1d, 0x58, 0x49, 0x02, 0x71, 0x1a, 0xdf, 0xfe, 0x75, 0x31, 0xc7, 0x5e, 0xe4, 0x7e, 0xb0,
+ 0x1d, 0xdf, 0xdf, 0xdb, 0xf1, 0x9b, 0x16, 0x4c, 0xd2, 0xd6, 0x99, 0x41, 0xa8, 0x64, 0xa4, 0x54,
+ 0x2e, 0x0f, 0xab, 0x43, 0x2e, 0x8f, 0x73, 0xf4, 0xd8, 0xae, 0xfb, 0xed, 0x48, 0x48, 0x47, 0xb5,
+ 0x73, 0x99, 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x08, 0x84, 0xd7, 0xbd, 0x8e, 0x47, 0x82, 0x00,
+ 0x0b, 0xa8, 0x4c, 0xf5, 0xd1, 0x97, 0x9d, 0xea, 0x83, 0x47, 0x6c, 0x17, 0x76, 0x74, 0x82, 0xa5,
+ 0xd5, 0x22, 0xb6, 0x4b, 0x03, 0xbb, 0x18, 0xc7, 0xfe, 0x76, 0x11, 0x46, 0x2b, 0x7e, 0x3d, 0x36,
+ 0xec, 0x78, 0xd1, 0x30, 0xec, 0x38, 0x9b, 0x30, 0xec, 0x98, 0xd4, 0x71, 0x35, 0x33, 0x8e, 0x37,
+ 0x01, 0xf9, 0x22, 0x90, 0xfc, 0x65, 0xe2, 0x31, 0xbb, 0x37, 0x61, 0xa8, 0x57, 0x8c, 0xcd, 0x1e,
+ 0xae, 0xa7, 0x30, 0x70, 0x46, 0xad, 0x0f, 0x4c, 0x42, 0x0e, 0xd7, 0x24, 0xe4, 0x4f, 0x2c, 0xb6,
+ 0x02, 0x16, 0x57, 0xab, 0xdc, 0x56, 0x19, 0x5d, 0x84, 0x11, 0x76, 0x5a, 0xb2, 0x90, 0x11, 0xd2,
+ 0x72, 0x82, 0xa5, 0xf1, 0x5c, 0x8d, 0x8b, 0xb1, 0x8e, 0x83, 0xce, 0xc3, 0x50, 0x48, 0x9c, 0xa0,
+ 0xb6, 0xa9, 0xae, 0x0a, 0x61, 0xe6, 0xc0, 0xcb, 0xb0, 0x82, 0xa2, 0xb7, 0xe2, 0xc0, 0xe3, 0xc5,
+ 0x7c, 0xc3, 0x67, 0xbd, 0x3f, 0x7c, 0xbb, 0xe5, 0x47, 0x1b, 0xb7, 0x6f, 0x01, 0x4a, 0xe3, 0xf7,
+ 0xe0, 0x49, 0x56, 0x32, 0x43, 0xe3, 0x0e, 0xa7, 0xc2, 0xe2, 0xfe, 0x9b, 0x05, 0xe3, 0x15, 0xbf,
+ 0x4e, 0x8f, 0x81, 0xef, 0xa5, 0x3d, 0xaf, 0x67, 0x5d, 0x18, 0xe8, 0x90, 0x75, 0xe1, 0x31, 0xe8,
+ 0xaf, 0xf8, 0xf5, 0x2e, 0xe1, 0x7b, 0x7f, 0xc5, 0x82, 0xc1, 0x8a, 0x5f, 0x3f, 0x04, 0x25, 0xce,
+ 0x6b, 0xa6, 0x12, 0xe7, 0x44, 0xce, 0xba, 0xc9, 0xd1, 0xdb, 0xfc, 0x79, 0x1f, 0x8c, 0xd1, 0x7e,
+ 0xfa, 0x1b, 0x72, 0x2a, 0x8d, 0x61, 0xb3, 0x7a, 0x18, 0x36, 0xfa, 0xa4, 0xf0, 0x1b, 0x0d, 0xff,
+ 0x4e, 0x72, 0x5a, 0x97, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x2c, 0x0c, 0xb5, 0x02, 0xb2, 0xed, 0xfa,
+ 0x82, 0x57, 0xd7, 0x54, 0x62, 0x15, 0x51, 0x8e, 0x15, 0x06, 0x7d, 0xc4, 0x87, 0xae, 0x47, 0xf9,
+ 0x92, 0x9a, 0xef, 0xd5, 0xb9, 0x9e, 0xa3, 0x28, 0x52, 0x83, 0x69, 0xe5, 0xd8, 0xc0, 0x42, 0xb7,
+ 0x60, 0x98, 0xfd, 0x67, 0xc7, 0x4e, 0xff, 0x81, 0x8f, 0x1d, 0x91, 0x2c, 0x59, 0x10, 0xc0, 0x31,
+ 0x2d, 0xf4, 0x3c, 0x40, 0x24, 0xd3, 0xeb, 0x84, 0x22, 0x8c, 0xab, 0x7a, 0xd7, 0xa8, 0xc4, 0x3b,
+ 0x21, 0xd6, 0xb0, 0xd0, 0x33, 0x30, 0x1c, 0x39, 0x6e, 0xe3, 0x9a, 0xeb, 0x31, 0x5b, 0x00, 0xda,
+ 0x7f, 0x91, 0xb3, 0x58, 0x14, 0xe2, 0x18, 0x4e, 0xf9, 0x4a, 0x16, 0xdd, 0x6a, 0x7e, 0x27, 0x12,
+ 0xe9, 0xf9, 0x8a, 0x9c, 0xaf, 0xbc, 0xa6, 0x4a, 0xb1, 0x86, 0x81, 0x36, 0xe1, 0x94, 0xeb, 0xb1,
+ 0x34, 0x5a, 0xa4, 0xba, 0xe5, 0xb6, 0xd6, 0xae, 0x55, 0x6f, 0x92, 0xc0, 0x5d, 0xdf, 0x99, 0x77,
+ 0x6a, 0x5b, 0xc4, 0xab, 0x33, 0xb1, 0xc3, 0xd0, 0xfc, 0xe3, 0xa2, 0x8b, 0xa7, 0xca, 0x1d, 0x70,
+ 0x71, 0x47, 0x4a, 0xc8, 0xa6, 0xdb, 0x31, 0x20, 0x4e, 0x53, 0xc8, 0x17, 0x78, 0x0a, 0x1e, 0x56,
+ 0x82, 0x05, 0xc4, 0x7e, 0x81, 0xed, 0x89, 0xeb, 0x55, 0xf4, 0xb4, 0x71, 0xbc, 0x1c, 0xd7, 0x8f,
+ 0x97, 0xfd, 0xdd, 0xd2, 0xc0, 0xf5, 0xaa, 0x16, 0xe9, 0xe8, 0x12, 0x1c, 0xab, 0xf8, 0xf5, 0x8a,
+ 0x1f, 0x44, 0xcb, 0x7e, 0x70, 0xc7, 0x09, 0xea, 0x72, 0x09, 0x96, 0x64, 0xac, 0x27, 0x7a, 0xc6,
+ 0xf6, 0xf3, 0x13, 0xc8, 0x88, 0xe3, 0xf4, 0x02, 0xe3, 0x10, 0x0f, 0xe8, 0x5a, 0x5b, 0x63, 0xbc,
+ 0x8a, 0x4a, 0x56, 0x77, 0xd9, 0x89, 0x08, 0xba, 0x0e, 0x63, 0x35, 0xfd, 0xda, 0x16, 0xd5, 0x9f,
+ 0x92, 0x97, 0x9d, 0x71, 0xa7, 0x67, 0xde, 0xf3, 0x66, 0x7d, 0xfb, 0x1b, 0x96, 0x68, 0x85, 0x4b,
+ 0x3e, 0xb8, 0x0d, 0x6d, 0xf7, 0x33, 0x77, 0x01, 0xa6, 0x02, 0xbd, 0x8a, 0x66, 0x8b, 0x76, 0x8c,
+ 0x67, 0xff, 0x49, 0x00, 0x71, 0x1a, 0x1f, 0x7d, 0x12, 0x4e, 0x1a, 0x85, 0x52, 0x2d, 0xaf, 0xe5,
+ 0xe0, 0x66, 0xb2, 0x21, 0x9c, 0x87, 0x84, 0xf3, 0xeb, 0xdb, 0x3f, 0x08, 0xc7, 0x93, 0xdf, 0x25,
+ 0xa4, 0x35, 0xf7, 0xf9, 0x75, 0x85, 0x83, 0x7d, 0x9d, 0xfd, 0x12, 0x4c, 0xd1, 0x67, 0xbc, 0x62,
+ 0x49, 0xd9, 0xfc, 0x75, 0x0f, 0xa7, 0xf5, 0xdb, 0x43, 0xec, 0x1a, 0x4c, 0x64, 0xa0, 0x43, 0x9f,
+ 0x86, 0xf1, 0x90, 0xb0, 0x18, 0x72, 0x52, 0x4a, 0xd8, 0xc1, 0x2f, 0xbe, 0xba, 0xa4, 0x63, 0xf2,
+ 0x97, 0x90, 0x59, 0x86, 0x13, 0xd4, 0x50, 0x13, 0xc6, 0xef, 0xb8, 0x5e, 0xdd, 0xbf, 0x13, 0x4a,
+ 0xfa, 0x43, 0xf9, 0x2a, 0x87, 0x5b, 0x1c, 0x33, 0xd1, 0x47, 0xa3, 0xb9, 0x5b, 0x06, 0x31, 0x9c,
+ 0x20, 0x4e, 0x8f, 0x9a, 0xa0, 0xed, 0xcd, 0x85, 0x37, 0x42, 0x12, 0x88, 0x08, 0x77, 0xec, 0xa8,
+ 0xc1, 0xb2, 0x10, 0xc7, 0x70, 0x7a, 0xd4, 0xb0, 0x3f, 0xcc, 0xb1, 0x9e, 0x9d, 0x65, 0xe2, 0xa8,
+ 0xc1, 0xaa, 0x14, 0x6b, 0x18, 0xf4, 0x28, 0x66, 0xff, 0x56, 0x7d, 0x0f, 0xfb, 0x7e, 0x24, 0x0f,
+ 0x6f, 0x96, 0xae, 0x53, 0x2b, 0xc7, 0x06, 0x56, 0x4e, 0x3c, 0xbd, 0xbe, 0x83, 0xc6, 0xd3, 0x43,
+ 0x51, 0x87, 0x58, 0x02, 0x3c, 0x22, 0xf4, 0xa5, 0x4e, 0xb1, 0x04, 0xf6, 0xef, 0x2b, 0xce, 0x00,
+ 0xe5, 0x05, 0xd6, 0xc5, 0x00, 0xf5, 0xf3, 0x80, 0x81, 0x4c, 0x29, 0x5a, 0xe5, 0xa3, 0x23, 0x61,
+ 0x68, 0x09, 0x06, 0xc3, 0x9d, 0xb0, 0x16, 0x35, 0xc2, 0x4e, 0x29, 0x59, 0xab, 0x0c, 0x45, 0xcb,
+ 0x08, 0xce, 0xab, 0x60, 0x59, 0x17, 0xd5, 0xe0, 0x88, 0xa0, 0xb8, 0xb0, 0xe9, 0x78, 0x2a, 0x51,
+ 0x24, 0xb7, 0x7e, 0xbc, 0xb8, 0xb7, 0x5b, 0x3a, 0x22, 0x5a, 0xd6, 0xc1, 0xfb, 0xbb, 0x25, 0xba,
+ 0x25, 0x33, 0x20, 0x38, 0x8b, 0x1a, 0x5f, 0xf2, 0xb5, 0x9a, 0xdf, 0x6c, 0x55, 0x02, 0x7f, 0xdd,
+ 0x6d, 0x90, 0x4e, 0x8a, 0xe5, 0xaa, 0x81, 0x29, 0x96, 0xbc, 0x51, 0x86, 0x13, 0xd4, 0xd0, 0x6d,
+ 0x98, 0x70, 0x5a, 0xad, 0xb9, 0xa0, 0xe9, 0x07, 0xb2, 0x81, 0x91, 0x7c, 0x0d, 0xc5, 0x9c, 0x89,
+ 0xca, 0xf3, 0x44, 0x26, 0x0a, 0x71, 0x92, 0x20, 0x1d, 0x28, 0xb1, 0xd1, 0x8c, 0x81, 0x1a, 0x8b,
+ 0x07, 0x4a, 0xec, 0xcb, 0x8c, 0x81, 0xca, 0x80, 0xe0, 0x2c, 0x6a, 0xf6, 0x0f, 0x30, 0xc6, 0x9f,
+ 0xc5, 0x9b, 0x66, 0x6e, 0x46, 0x4d, 0x18, 0x6b, 0xb1, 0x63, 0x5f, 0xe4, 0x70, 0x13, 0x47, 0xc5,
+ 0x8b, 0x3d, 0x0a, 0x42, 0xef, 0xb0, 0x2c, 0xb4, 0x86, 0x41, 0x6c, 0x45, 0x27, 0x87, 0x4d, 0xea,
+ 0xf6, 0x2f, 0xcd, 0x30, 0xd6, 0xb1, 0xca, 0xa5, 0x9b, 0x83, 0xc2, 0xe9, 0x52, 0xc8, 0x33, 0x66,
+ 0xf2, 0xf5, 0x08, 0xf1, 0xfa, 0x12, 0x8e, 0x9b, 0x58, 0xd6, 0x45, 0x9f, 0x82, 0x71, 0xd7, 0x73,
+ 0xe3, 0xec, 0xcd, 0xe1, 0xf4, 0xd1, 0xfc, 0x68, 0x5e, 0x0a, 0x4b, 0xcf, 0xef, 0xa8, 0x57, 0xc6,
+ 0x09, 0x62, 0xe8, 0x2d, 0x66, 0x23, 0x2a, 0x49, 0x17, 0x7a, 0x21, 0xad, 0x9b, 0x83, 0x4a, 0xb2,
+ 0x1a, 0x11, 0xd4, 0x86, 0x23, 0xe9, 0x2c, 0xd6, 0xe1, 0xb4, 0x9d, 0xff, 0x36, 0x4a, 0x27, 0xa2,
+ 0x8e, 0x13, 0xf1, 0xa5, 0x61, 0x21, 0xce, 0xa2, 0x8f, 0xae, 0x25, 0x73, 0x0c, 0x17, 0x0d, 0x0d,
+ 0x44, 0x2a, 0xcf, 0xf0, 0x58, 0xc7, 0xf4, 0xc2, 0x1b, 0x70, 0x5a, 0x4b, 0xd3, 0x7a, 0x39, 0x70,
+ 0x98, 0x8d, 0x92, 0xcb, 0x6e, 0x23, 0x8d, 0xa9, 0x7d, 0x74, 0x6f, 0xb7, 0x74, 0x7a, 0xad, 0x13,
+ 0x22, 0xee, 0x4c, 0x07, 0x5d, 0x87, 0x63, 0x3c, 0x16, 0xcd, 0x22, 0x71, 0xea, 0x0d, 0xd7, 0x53,
+ 0x5c, 0x33, 0x3f, 0xbb, 0x4e, 0xee, 0xed, 0x96, 0x8e, 0xcd, 0x65, 0x21, 0xe0, 0xec, 0x7a, 0xe8,
+ 0x35, 0x18, 0xae, 0x7b, 0xf2, 0x94, 0x1d, 0x30, 0x32, 0xe1, 0x0e, 0x2f, 0xae, 0x56, 0xd5, 0xf7,
+ 0xc7, 0x7f, 0x70, 0x5c, 0x01, 0x6d, 0x70, 0x15, 0x98, 0x92, 0x5b, 0x0e, 0xa6, 0x42, 0x94, 0x26,
+ 0x45, 0xfb, 0x46, 0x70, 0x07, 0xae, 0xfb, 0x55, 0x0e, 0x80, 0x46, 0xdc, 0x07, 0x83, 0x30, 0x7a,
+ 0x13, 0x90, 0xc8, 0xb8, 0x34, 0x57, 0x63, 0x09, 0x02, 0x35, 0xbb, 0x54, 0x25, 0x42, 0xa8, 0xa6,
+ 0x30, 0x70, 0x46, 0x2d, 0x74, 0x85, 0x1e, 0x8f, 0x7a, 0xa9, 0x38, 0x7e, 0x55, 0xbe, 0xf5, 0x45,
+ 0xd2, 0x0a, 0x08, 0x33, 0xa5, 0x34, 0x29, 0xe2, 0x44, 0x3d, 0x54, 0x87, 0x53, 0x4e, 0x3b, 0xf2,
+ 0x99, 0x76, 0xd1, 0x44, 0x5d, 0xf3, 0xb7, 0x88, 0xc7, 0x14, 0xfb, 0x43, 0x2c, 0xf4, 0xe9, 0xa9,
+ 0xb9, 0x0e, 0x78, 0xb8, 0x23, 0x15, 0xfa, 0x9c, 0xa2, 0x63, 0xa1, 0x29, 0xfe, 0x0c, 0x3f, 0x75,
+ 0xae, 0x0d, 0x97, 0x18, 0xe8, 0x25, 0x18, 0xd9, 0xf4, 0xc3, 0x68, 0x95, 0x44, 0x77, 0xfc, 0x60,
+ 0x4b, 0xa4, 0x78, 0x88, 0xd3, 0xea, 0xc4, 0x20, 0xac, 0xe3, 0xa1, 0xa7, 0x60, 0x90, 0x99, 0x9d,
+ 0x95, 0x17, 0xd9, 0x5d, 0x3b, 0x14, 0x9f, 0x31, 0x57, 0x78, 0x31, 0x96, 0x70, 0x89, 0x5a, 0xae,
+ 0x2c, 0xb0, 0xe3, 0x38, 0x81, 0x5a, 0xae, 0x2c, 0x60, 0x09, 0xa7, 0xcb, 0x35, 0xdc, 0x74, 0x02,
+ 0x52, 0x09, 0xfc, 0x1a, 0x09, 0xb5, 0x64, 0x4e, 0x8f, 0xf0, 0x04, 0x16, 0x74, 0xb9, 0x56, 0xb3,
+ 0x10, 0x70, 0x76, 0x3d, 0x44, 0xd2, 0x29, 0x8a, 0xc7, 0xf3, 0xd5, 0xae, 0x69, 0x76, 0xb0, 0xc7,
+ 0x2c, 0xc5, 0x1e, 0x4c, 0xaa, 0xe4, 0xc8, 0x3c, 0x65, 0x45, 0x38, 0x3d, 0xc1, 0xd6, 0x76, 0xef,
+ 0xf9, 0x2e, 0x94, 0x22, 0xbb, 0x9c, 0xa0, 0x84, 0x53, 0xb4, 0x8d, 0xd8, 0xba, 0x93, 0x5d, 0x63,
+ 0xeb, 0x5e, 0x80, 0xe1, 0xb0, 0x7d, 0xbb, 0xee, 0x37, 0x1d, 0xd7, 0x63, 0xd6, 0x3b, 0xda, 0xc3,
+ 0xbd, 0x2a, 0x01, 0x38, 0xc6, 0x41, 0xcb, 0x30, 0xe4, 0x48, 0x2d, 0x35, 0xca, 0x0f, 0x1b, 0xa8,
+ 0x74, 0xd3, 0x3c, 0x92, 0x96, 0xd4, 0x4b, 0xab, 0xba, 0xe8, 0x55, 0x18, 0x13, 0xa1, 0x49, 0x78,
+ 0x14, 0x1e, 0x66, 0x5d, 0xa3, 0x39, 0x53, 0x57, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x0d, 0x18, 0x89,
+ 0xfc, 0x86, 0x90, 0x71, 0x86, 0xd3, 0xc7, 0xf3, 0xa3, 0xfb, 0xae, 0x29, 0x34, 0x5d, 0x7f, 0xa2,
+ 0xaa, 0x62, 0x9d, 0x0e, 0x5a, 0xe3, 0xeb, 0x9d, 0xa5, 0x6e, 0x22, 0xa1, 0x48, 0x48, 0x7f, 0x3a,
+ 0xcf, 0xf4, 0x92, 0xa1, 0x99, 0xdb, 0x41, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x19, 0xa6, 0x5a, 0x81,
+ 0xeb, 0xb3, 0x35, 0xa1, 0xb4, 0xee, 0xd3, 0x66, 0xa2, 0xd6, 0x4a, 0x12, 0x01, 0xa7, 0xeb, 0xb0,
+ 0xc8, 0x32, 0xa2, 0x70, 0xfa, 0x24, 0x4f, 0x36, 0xc7, 0xe5, 0x20, 0xbc, 0x0c, 0x2b, 0x28, 0x5a,
+ 0x61, 0x27, 0x31, 0x17, 0xe1, 0x4d, 0xcf, 0xe4, 0xc7, 0x2b, 0xd0, 0x45, 0x7d, 0x9c, 0xf7, 0x57,
+ 0x7f, 0x71, 0x4c, 0x01, 0xd5, 0xb5, 0x1c, 0xef, 0xf4, 0x05, 0x15, 0x4e, 0x9f, 0xea, 0x60, 0xfb,
+ 0x9b, 0x78, 0x2e, 0xc7, 0x0c, 0x81, 0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x06, 0x4c, 0x8a, 0xb0,
+ 0x0b, 0xf1, 0x30, 0x9d, 0x8e, 0xfd, 0xa3, 0x70, 0x02, 0x86, 0x53, 0xd8, 0x3c, 0xd9, 0x9b, 0x73,
+ 0xbb, 0x41, 0xc4, 0xd1, 0x77, 0xcd, 0xf5, 0xb6, 0xc2, 0xe9, 0x33, 0xec, 0x7c, 0x10, 0xc9, 0xde,
+ 0x92, 0x50, 0x9c, 0x51, 0x03, 0xad, 0xc1, 0x64, 0x2b, 0x20, 0xa4, 0xc9, 0xde, 0x49, 0xe2, 0x3e,
+ 0x2b, 0xf1, 0xc0, 0x4a, 0xb4, 0x27, 0x95, 0x04, 0x6c, 0x3f, 0xa3, 0x0c, 0xa7, 0x28, 0xa0, 0x3b,
+ 0x30, 0xe4, 0x6f, 0x93, 0x60, 0x93, 0x38, 0xf5, 0xe9, 0xb3, 0x1d, 0xbc, 0xf6, 0xc4, 0xe5, 0x76,
+ 0x5d, 0xe0, 0x26, 0x8c, 0x9a, 0x64, 0x71, 0x77, 0xa3, 0x26, 0xd9, 0x18, 0xfa, 0x4f, 0x2c, 0x38,
+ 0x29, 0xd5, 0x84, 0xd5, 0x16, 0x1d, 0xf5, 0x05, 0xdf, 0x0b, 0xa3, 0x80, 0x87, 0x02, 0x7a, 0x34,
+ 0x3f, 0x3c, 0xce, 0x5a, 0x4e, 0x25, 0xa5, 0x45, 0x38, 0x99, 0x87, 0x11, 0xe2, 0xfc, 0x16, 0xe9,
+ 0xcb, 0x3e, 0x24, 0x91, 0x3c, 0x8c, 0xe6, 0xc2, 0xe5, 0xb7, 0x16, 0x57, 0xa7, 0x1f, 0xe3, 0x71,
+ 0x8c, 0xe8, 0x66, 0xa8, 0x26, 0x81, 0x38, 0x8d, 0x8f, 0x2e, 0x42, 0xc1, 0x0f, 0xa7, 0x1f, 0x67,
+ 0x6b, 0xfb, 0x64, 0xce, 0x38, 0x5e, 0xaf, 0x72, 0xe3, 0xd6, 0xeb, 0x55, 0x5c, 0xf0, 0x43, 0x99,
+ 0x70, 0x8d, 0x3e, 0x67, 0xc3, 0xe9, 0x27, 0xb8, 0xcc, 0x59, 0x26, 0x5c, 0x63, 0x85, 0x38, 0x86,
+ 0xa3, 0x4d, 0x98, 0x08, 0x0d, 0xb1, 0x41, 0x38, 0x7d, 0x8e, 0x8d, 0xd4, 0x13, 0x79, 0x93, 0x66,
+ 0x60, 0x6b, 0x99, 0x90, 0x4c, 0x2a, 0x38, 0x49, 0x96, 0xef, 0x2e, 0x4d, 0x70, 0x11, 0x4e, 0x3f,
+ 0xd9, 0x65, 0x77, 0x69, 0xc8, 0xfa, 0xee, 0xd2, 0x69, 0xe0, 0x04, 0x4d, 0x74, 0x43, 0x77, 0x89,
+ 0x3c, 0x9f, 0x6f, 0x28, 0x99, 0xe9, 0x0c, 0x39, 0x96, 0xe7, 0x08, 0x39, 0xf3, 0x7d, 0x30, 0x95,
+ 0xe2, 0xc2, 0x0e, 0xe2, 0x1f, 0x32, 0xb3, 0x05, 0x63, 0xc6, 0x4a, 0x7f, 0xa8, 0xe6, 0x43, 0x3f,
+ 0x03, 0x30, 0xac, 0xcc, 0x3a, 0x72, 0xf4, 0x6c, 0x53, 0xf7, 0xa5, 0x67, 0xbb, 0x60, 0x5a, 0x1f,
+ 0x9d, 0x4c, 0x5a, 0x1f, 0x0d, 0x55, 0xfc, 0xba, 0x61, 0x70, 0xb4, 0x96, 0x11, 0x41, 0x38, 0xef,
+ 0x8c, 0xee, 0xdd, 0x21, 0x4e, 0x53, 0x55, 0x15, 0x7b, 0x36, 0x63, 0xea, 0xeb, 0xa8, 0xfd, 0xba,
+ 0x0c, 0x53, 0x9e, 0xcf, 0x9e, 0x11, 0xa4, 0x2e, 0x79, 0x44, 0xc6, 0x0a, 0x0e, 0xeb, 0x11, 0xee,
+ 0x12, 0x08, 0x38, 0x5d, 0x87, 0x36, 0xc8, 0x79, 0xb9, 0xa4, 0xba, 0x8d, 0xb3, 0x7a, 0x58, 0x40,
+ 0xe9, 0xf3, 0x95, 0xff, 0x0a, 0xa7, 0x27, 0xf3, 0x9f, 0xaf, 0xbc, 0x52, 0x92, 0x5f, 0x0c, 0x25,
+ 0xbf, 0xc8, 0xb4, 0x4b, 0x2d, 0xbf, 0x5e, 0xae, 0x88, 0x97, 0x88, 0x16, 0xdb, 0xbf, 0x5e, 0xae,
+ 0x60, 0x0e, 0x43, 0x73, 0x30, 0xc0, 0x7e, 0xc8, 0xc8, 0x41, 0x79, 0x27, 0x49, 0xb9, 0xa2, 0xe5,
+ 0xa4, 0x65, 0x15, 0xb0, 0xa8, 0xc8, 0xb4, 0x07, 0xf4, 0xf9, 0xc6, 0xb4, 0x07, 0x83, 0xf7, 0xa9,
+ 0x3d, 0x90, 0x04, 0x70, 0x4c, 0x0b, 0xdd, 0x85, 0x63, 0xc6, 0x93, 0x59, 0x79, 0x08, 0x42, 0xbe,
+ 0x91, 0x42, 0x02, 0x79, 0xfe, 0xb4, 0xe8, 0xf4, 0xb1, 0x72, 0x16, 0x25, 0x9c, 0xdd, 0x00, 0x6a,
+ 0xc0, 0x54, 0x2d, 0xd5, 0xea, 0x50, 0xef, 0xad, 0xaa, 0x75, 0x91, 0x6e, 0x31, 0x4d, 0x18, 0xbd,
+ 0x0a, 0x43, 0xef, 0xfa, 0xdc, 0xa0, 0x50, 0xbc, 0x9e, 0x64, 0x7c, 0x9b, 0xa1, 0xb7, 0xae, 0x57,
+ 0x59, 0xf9, 0xfe, 0x6e, 0x69, 0xa4, 0xe2, 0xd7, 0xe5, 0x5f, 0xac, 0x2a, 0xa0, 0x1f, 0xb3, 0x60,
+ 0x26, 0xfd, 0x26, 0x57, 0x9d, 0x1e, 0xeb, 0xbd, 0xd3, 0xb6, 0x68, 0x74, 0x66, 0x29, 0x97, 0x1c,
+ 0xee, 0xd0, 0x14, 0xfa, 0x28, 0xdd, 0x4f, 0xa1, 0x7b, 0x8f, 0x88, 0x84, 0xfe, 0x8f, 0xc6, 0xfb,
+ 0x89, 0x96, 0xee, 0xef, 0x96, 0x26, 0xf8, 0xe1, 0xed, 0xde, 0x53, 0x59, 0x08, 0x78, 0x05, 0xf4,
+ 0x83, 0x70, 0x2c, 0x48, 0xcb, 0xc8, 0x89, 0x7c, 0x27, 0x3c, 0xdd, 0xcb, 0x45, 0x90, 0x9c, 0x70,
+ 0x9c, 0x45, 0x10, 0x67, 0xb7, 0x63, 0xff, 0xa1, 0xc5, 0x74, 0x23, 0xa2, 0x5b, 0x24, 0x6c, 0x37,
+ 0xa2, 0x43, 0x30, 0xe2, 0x5b, 0x32, 0x6c, 0x13, 0xee, 0xdb, 0x0a, 0xef, 0xbf, 0xb3, 0x98, 0x15,
+ 0xde, 0x21, 0xfa, 0x13, 0xbe, 0x05, 0x43, 0x91, 0x68, 0x4d, 0x74, 0x3d, 0xcf, 0x62, 0x48, 0x76,
+ 0x8a, 0x59, 0x22, 0xaa, 0x77, 0x98, 0x2c, 0xc5, 0x8a, 0x8c, 0xfd, 0x5f, 0xf3, 0x19, 0x90, 0x90,
+ 0x43, 0x50, 0x01, 0x2f, 0x9a, 0x2a, 0xe0, 0x52, 0x97, 0x2f, 0xc8, 0x51, 0x05, 0xff, 0x57, 0x66,
+ 0xbf, 0x99, 0xfc, 0xf1, 0xfd, 0x6e, 0xfe, 0x69, 0x7f, 0xd1, 0x02, 0x88, 0xd3, 0xbe, 0xf4, 0x90,
+ 0xc0, 0xfb, 0x12, 0x7d, 0x79, 0xf9, 0x91, 0x5f, 0xf3, 0x1b, 0x42, 0x05, 0x75, 0x2a, 0xd6, 0x42,
+ 0xf3, 0xf2, 0x7d, 0xed, 0x37, 0x56, 0xd8, 0xa8, 0x24, 0xe3, 0x30, 0x17, 0x63, 0xbb, 0x08, 0x23,
+ 0x06, 0xf3, 0x57, 0x2c, 0x38, 0x9a, 0xe5, 0x9c, 0x42, 0xdf, 0xf1, 0x5c, 0x12, 0xab, 0x4c, 0x73,
+ 0xd5, 0x6c, 0xde, 0x14, 0xe5, 0x58, 0x61, 0xf4, 0x9c, 0x19, 0xfd, 0x60, 0x29, 0x49, 0xae, 0xc3,
+ 0x58, 0x25, 0x20, 0x1a, 0x7f, 0xf1, 0x7a, 0x9c, 0x2d, 0x69, 0x78, 0xfe, 0xd9, 0x03, 0x47, 0x7c,
+ 0xb2, 0xbf, 0x5a, 0x80, 0xa3, 0xdc, 0xc0, 0x6c, 0x6e, 0xdb, 0x77, 0xeb, 0x15, 0xbf, 0x2e, 0x5c,
+ 0x8a, 0xdf, 0x86, 0xd1, 0x96, 0x26, 0x3e, 0xef, 0x14, 0x5e, 0x5f, 0x17, 0xb3, 0xc7, 0x02, 0x3f,
+ 0xbd, 0x14, 0x1b, 0xb4, 0x50, 0x1d, 0x46, 0xc9, 0xb6, 0x5b, 0x53, 0x96, 0x45, 0x85, 0x03, 0x5f,
+ 0xd2, 0xaa, 0x95, 0x25, 0x8d, 0x0e, 0x36, 0xa8, 0xf6, 0x6c, 0x16, 0xae, 0xb1, 0x68, 0x7d, 0x5d,
+ 0xac, 0x89, 0x7e, 0xce, 0x82, 0x13, 0x39, 0xc1, 0xf8, 0x69, 0x73, 0x77, 0x98, 0x29, 0x9f, 0x58,
+ 0xb6, 0xaa, 0x39, 0x6e, 0xe0, 0x87, 0x05, 0x14, 0x7d, 0x1c, 0xa0, 0x15, 0xa7, 0x30, 0xed, 0x12,
+ 0xb5, 0xdc, 0x88, 0x5f, 0xac, 0x85, 0xa2, 0x55, 0x99, 0x4e, 0x35, 0x5a, 0xf6, 0x57, 0xfa, 0xa0,
+ 0x9f, 0x19, 0x71, 0xa1, 0x0a, 0x0c, 0x6e, 0xf2, 0x48, 0x89, 0x1d, 0xe7, 0x8d, 0xe2, 0xca, 0xd0,
+ 0x8b, 0xf1, 0xbc, 0x69, 0xa5, 0x58, 0x92, 0x41, 0x2b, 0x70, 0x84, 0xa7, 0x67, 0x6d, 0x2c, 0x92,
+ 0x86, 0xb3, 0x23, 0x25, 0xd3, 0x05, 0xf6, 0xa9, 0x4a, 0x42, 0x5f, 0x4e, 0xa3, 0xe0, 0xac, 0x7a,
+ 0xe8, 0x75, 0x18, 0x8f, 0xdc, 0x26, 0xf1, 0xdb, 0x91, 0xa4, 0xc4, 0xf3, 0xa1, 0xaa, 0xc7, 0xd3,
+ 0x9a, 0x01, 0xc5, 0x09, 0x6c, 0xf4, 0x2a, 0x8c, 0xb5, 0x52, 0x32, 0xf8, 0xfe, 0x58, 0x58, 0x65,
+ 0xca, 0xdd, 0x4d, 0x5c, 0xe6, 0x9f, 0xd2, 0x66, 0xde, 0x38, 0x6b, 0x9b, 0x01, 0x09, 0x37, 0xfd,
+ 0x46, 0x9d, 0x71, 0xc0, 0xfd, 0x9a, 0x7f, 0x4a, 0x02, 0x8e, 0x53, 0x35, 0x28, 0x95, 0x75, 0xc7,
+ 0x6d, 0xb4, 0x03, 0x12, 0x53, 0x19, 0x30, 0xa9, 0x2c, 0x27, 0xe0, 0x38, 0x55, 0xa3, 0xbb, 0x72,
+ 0x61, 0xf0, 0xc1, 0x28, 0x17, 0xec, 0x5f, 0x2d, 0x80, 0x31, 0xb5, 0xdf, 0xc3, 0xd9, 0x56, 0x5f,
+ 0x83, 0xbe, 0x8d, 0xa0, 0x55, 0x13, 0x06, 0x8b, 0x99, 0x5f, 0x76, 0x19, 0x57, 0x16, 0xf4, 0x2f,
+ 0xa3, 0xff, 0x31, 0xab, 0x45, 0xf7, 0xf8, 0xb1, 0x4a, 0xe0, 0xd3, 0x4b, 0x4e, 0x06, 0x53, 0x55,
+ 0x6e, 0x60, 0x83, 0xf2, 0xbd, 0xde, 0x21, 0xec, 0xb8, 0xf0, 0x65, 0xe1, 0x14, 0x0c, 0xdb, 0xbe,
+ 0xaa, 0x78, 0xad, 0x4b, 0x2a, 0xe8, 0x22, 0x8c, 0x88, 0x04, 0x98, 0xcc, 0x5b, 0x89, 0x6f, 0x26,
+ 0x66, 0x8b, 0xb8, 0x18, 0x17, 0x63, 0x1d, 0xc7, 0xfe, 0xf1, 0x02, 0x1c, 0xc9, 0x70, 0x37, 0xe5,
+ 0xd7, 0xc8, 0x86, 0x1b, 0x46, 0xc1, 0x4e, 0xf2, 0x72, 0xc2, 0xa2, 0x1c, 0x2b, 0x0c, 0x7a, 0x56,
+ 0xf1, 0x8b, 0x2a, 0x79, 0x39, 0x09, 0x77, 0x2e, 0x01, 0x3d, 0xd8, 0xe5, 0x44, 0xaf, 0xed, 0x76,
+ 0x48, 0x64, 0x86, 0x03, 0x75, 0x6d, 0x33, 0xc3, 0x05, 0x06, 0xa1, 0x4f, 0xc0, 0x0d, 0xa5, 0x8d,
+ 0xd7, 0x9e, 0x80, 0x5c, 0x1f, 0xcf, 0x61, 0xb4, 0x73, 0x11, 0xf1, 0x1c, 0x2f, 0x12, 0x0f, 0xc5,
+ 0x38, 0xf2, 0x35, 0x2b, 0xc5, 0x02, 0x6a, 0x7f, 0xb9, 0x08, 0x27, 0x73, 0x1d, 0xd0, 0x69, 0xd7,
+ 0x9b, 0xbe, 0xe7, 0x46, 0xbe, 0x32, 0xf2, 0xe4, 0xd1, 0xae, 0x49, 0x6b, 0x73, 0x45, 0x94, 0x63,
+ 0x85, 0x81, 0xce, 0x41, 0x3f, 0x93, 0xdb, 0x27, 0x93, 0xdf, 0xe1, 0xf9, 0x45, 0x1e, 0x0b, 0x94,
+ 0x83, 0xb5, 0x5b, 0xbd, 0xd8, 0xf1, 0x56, 0x7f, 0x8c, 0x72, 0x30, 0x7e, 0x23, 0x79, 0xa1, 0xd0,
+ 0xee, 0xfa, 0x7e, 0x03, 0x33, 0x20, 0x7a, 0x42, 0x8c, 0x57, 0xc2, 0xaa, 0x11, 0x3b, 0x75, 0x3f,
+ 0xd4, 0x06, 0xed, 0x29, 0x18, 0xdc, 0x22, 0x3b, 0x81, 0xeb, 0x6d, 0x24, 0xad, 0x5d, 0xaf, 0xf2,
+ 0x62, 0x2c, 0xe1, 0x66, 0x96, 0xf8, 0xc1, 0x07, 0x91, 0x25, 0x5e, 0x5f, 0x01, 0x43, 0x5d, 0xd9,
+ 0x93, 0x9f, 0x28, 0xc2, 0x04, 0x9e, 0x5f, 0xfc, 0x60, 0x22, 0x6e, 0xa4, 0x27, 0xe2, 0x41, 0x24,
+ 0x53, 0x3f, 0xd8, 0x6c, 0xfc, 0x9e, 0x05, 0x13, 0x2c, 0x0d, 0xa7, 0x88, 0x1e, 0xe3, 0xfa, 0xde,
+ 0x21, 0x3c, 0x05, 0x1e, 0x83, 0xfe, 0x80, 0x36, 0x2a, 0x66, 0x50, 0xed, 0x71, 0xd6, 0x13, 0xcc,
+ 0x61, 0xe8, 0x14, 0xf4, 0xb1, 0x2e, 0xd0, 0xc9, 0x1b, 0xe5, 0x47, 0xf0, 0xa2, 0x13, 0x39, 0x98,
+ 0x95, 0xb2, 0x38, 0x96, 0x98, 0xb4, 0x1a, 0x2e, 0xef, 0x74, 0x6c, 0x55, 0xf1, 0xfe, 0x08, 0x4d,
+ 0x93, 0xd9, 0xb5, 0xf7, 0x16, 0xc7, 0x32, 0x9b, 0x64, 0xe7, 0x67, 0xf6, 0x3f, 0x15, 0xe0, 0x4c,
+ 0x66, 0xbd, 0x9e, 0xe3, 0x58, 0x76, 0xae, 0xfd, 0x30, 0x93, 0xf6, 0x15, 0x0f, 0xd1, 0x97, 0xa0,
+ 0xaf, 0x57, 0xee, 0xbf, 0xbf, 0x87, 0xf0, 0x92, 0x99, 0x43, 0xf6, 0x3e, 0x09, 0x2f, 0x99, 0xd9,
+ 0xb7, 0x1c, 0x31, 0xc1, 0xb7, 0x0b, 0x39, 0xdf, 0xc2, 0x04, 0x06, 0xe7, 0xe9, 0x39, 0xc3, 0x80,
+ 0xa1, 0x7c, 0x84, 0xf3, 0x33, 0x86, 0x97, 0x61, 0x05, 0x45, 0x73, 0x30, 0xd1, 0x74, 0x3d, 0x7a,
+ 0xf8, 0xec, 0x98, 0xac, 0xb8, 0x52, 0xb7, 0xac, 0x98, 0x60, 0x9c, 0xc4, 0x47, 0xae, 0x16, 0x7a,
+ 0x92, 0x7f, 0xdd, 0xab, 0x07, 0xda, 0x75, 0xb3, 0xa6, 0xc5, 0x89, 0x1a, 0xc5, 0x8c, 0x30, 0x94,
+ 0x2b, 0x9a, 0x9c, 0xa8, 0xd8, 0xbb, 0x9c, 0x68, 0x34, 0x5b, 0x46, 0x34, 0xf3, 0x2a, 0x8c, 0xdd,
+ 0xb7, 0x9e, 0xc5, 0xfe, 0x66, 0x11, 0x1e, 0xe9, 0xb0, 0xed, 0xf9, 0x59, 0x6f, 0xcc, 0x81, 0x76,
+ 0xd6, 0xa7, 0xe6, 0xa1, 0x02, 0x47, 0xd7, 0xdb, 0x8d, 0xc6, 0x0e, 0x73, 0xc0, 0x23, 0x75, 0x89,
+ 0x21, 0x78, 0x4a, 0x29, 0x1c, 0x39, 0xba, 0x9c, 0x81, 0x83, 0x33, 0x6b, 0xd2, 0x27, 0x16, 0xbd,
+ 0x49, 0x76, 0x14, 0xa9, 0xc4, 0x13, 0x0b, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0xcb, 0x30, 0xe5, 0x6c,
+ 0x3b, 0x2e, 0x4f, 0x7a, 0x22, 0x09, 0xf0, 0x37, 0x96, 0x92, 0x45, 0xcf, 0x25, 0x11, 0x70, 0xba,
+ 0x4e, 0x8e, 0x4a, 0xa8, 0x78, 0x5f, 0x2a, 0x21, 0x33, 0x08, 0xe2, 0x40, 0x7e, 0x10, 0xc4, 0xce,
+ 0xe7, 0x62, 0xd7, 0x7c, 0x91, 0xef, 0xc0, 0xd8, 0x41, 0x2d, 0xc7, 0x9f, 0x82, 0xc1, 0x40, 0x64,
+ 0xe2, 0x4f, 0x78, 0xbb, 0xcb, 0x3c, 0xe5, 0x12, 0x6e, 0xff, 0x6f, 0x16, 0x28, 0x59, 0xb2, 0x19,
+ 0xef, 0xfc, 0x55, 0x66, 0x06, 0xcf, 0xa5, 0xe0, 0x5a, 0x88, 0xb3, 0x63, 0x9a, 0x19, 0x7c, 0x0c,
+ 0xc4, 0x26, 0x2e, 0x5f, 0x6e, 0x61, 0x1c, 0x59, 0xc3, 0x78, 0x40, 0x08, 0x0d, 0xa4, 0xc2, 0x40,
+ 0x9f, 0x80, 0xc1, 0xba, 0xbb, 0xed, 0x86, 0x42, 0x8e, 0x76, 0x60, 0x1d, 0x60, 0xfc, 0x7d, 0x8b,
+ 0x9c, 0x0c, 0x96, 0xf4, 0xec, 0x9f, 0xb2, 0x40, 0xa9, 0x4e, 0xaf, 0x10, 0xa7, 0x11, 0x6d, 0xa2,
+ 0x37, 0x00, 0x24, 0x05, 0x25, 0x7b, 0x93, 0x06, 0x5d, 0x80, 0x15, 0x64, 0xdf, 0xf8, 0x87, 0xb5,
+ 0x3a, 0xe8, 0x75, 0x18, 0xd8, 0x64, 0xb4, 0xc4, 0xb7, 0x9d, 0x53, 0xaa, 0x2e, 0x56, 0xba, 0xbf,
+ 0x5b, 0x3a, 0x6a, 0xb6, 0x29, 0x6f, 0x31, 0x5e, 0xcb, 0xfe, 0x89, 0x42, 0x3c, 0xa7, 0x6f, 0xb5,
+ 0xfd, 0xc8, 0x39, 0x04, 0x4e, 0xe4, 0xb2, 0xc1, 0x89, 0x3c, 0xd1, 0x49, 0x37, 0xcc, 0xba, 0x94,
+ 0xcb, 0x81, 0x5c, 0x4f, 0x70, 0x20, 0x4f, 0x76, 0x27, 0xd5, 0x99, 0xf3, 0xf8, 0x6f, 0x2c, 0x98,
+ 0x32, 0xf0, 0x0f, 0xe1, 0x02, 0x5c, 0x36, 0x2f, 0xc0, 0x47, 0xbb, 0x7e, 0x43, 0xce, 0xc5, 0xf7,
+ 0xa3, 0xc5, 0x44, 0xdf, 0xd9, 0x85, 0xf7, 0x2e, 0xf4, 0x6d, 0x3a, 0x41, 0x5d, 0xbc, 0xeb, 0x2f,
+ 0xf4, 0x34, 0xd6, 0xb3, 0x57, 0x9c, 0x40, 0x18, 0x83, 0x3c, 0x2b, 0x47, 0x9d, 0x16, 0x75, 0x35,
+ 0x04, 0x61, 0x4d, 0xa1, 0x4b, 0x30, 0x10, 0xd6, 0xfc, 0x96, 0xf2, 0x29, 0x64, 0x49, 0xd4, 0xab,
+ 0xac, 0x64, 0x7f, 0xb7, 0x84, 0xcc, 0xe6, 0x68, 0x31, 0x16, 0xf8, 0xe8, 0x6d, 0x18, 0x63, 0xbf,
+ 0x94, 0x65, 0x66, 0x31, 0x5f, 0x02, 0x53, 0xd5, 0x11, 0xb9, 0xd9, 0xb2, 0x51, 0x84, 0x4d, 0x52,
+ 0x33, 0x1b, 0x30, 0xac, 0x3e, 0xeb, 0xa1, 0x6a, 0xfe, 0xff, 0xba, 0x08, 0x47, 0x32, 0xd6, 0x1c,
+ 0x0a, 0x8d, 0x99, 0xb8, 0xd8, 0xe3, 0x52, 0x7d, 0x8f, 0x73, 0x11, 0xb2, 0x07, 0x60, 0x5d, 0xac,
+ 0xad, 0x9e, 0x1b, 0xbd, 0x11, 0x92, 0x64, 0xa3, 0xb4, 0xa8, 0x7b, 0xa3, 0xb4, 0xb1, 0x43, 0x1b,
+ 0x6a, 0xda, 0x90, 0xea, 0xe9, 0x43, 0x9d, 0xd3, 0x3f, 0xe9, 0x83, 0xa3, 0x59, 0xe6, 0x2a, 0xe8,
+ 0x73, 0x30, 0xc0, 0x9c, 0xde, 0xa4, 0xe0, 0xec, 0xc5, 0x5e, 0x0d, 0x5d, 0x66, 0x99, 0xdf, 0x9c,
+ 0x08, 0x99, 0x3b, 0x2b, 0x8f, 0x23, 0x5e, 0xd8, 0x75, 0x98, 0x45, 0x9b, 0x2c, 0x94, 0x95, 0xb8,
+ 0x3d, 0xe5, 0xf1, 0xf1, 0x91, 0x9e, 0x3b, 0x20, 0xee, 0xdf, 0x30, 0x61, 0xf5, 0x25, 0x8b, 0xbb,
+ 0x5b, 0x7d, 0xc9, 0x96, 0x51, 0x19, 0x06, 0x6a, 0xdc, 0x9c, 0xa8, 0xd8, 0xfd, 0x08, 0xe3, 0xb6,
+ 0x44, 0xea, 0x00, 0x16, 0x36, 0x44, 0x82, 0xc0, 0x8c, 0x0b, 0x23, 0xda, 0xc0, 0x3c, 0xd4, 0xc5,
+ 0xb3, 0x45, 0x2f, 0x3e, 0x6d, 0x08, 0x1e, 0xea, 0x02, 0xfa, 0x59, 0xed, 0xee, 0x17, 0xe7, 0xc1,
+ 0x87, 0x0d, 0xde, 0xe9, 0x54, 0xc2, 0x15, 0x31, 0xb1, 0xaf, 0x18, 0x2f, 0x55, 0x35, 0x63, 0xcd,
+ 0xe7, 0x26, 0xcc, 0x32, 0x2f, 0xfc, 0xce, 0xf1, 0xe5, 0xed, 0x9f, 0xb3, 0x20, 0xe1, 0x2c, 0xa6,
+ 0xc4, 0x9d, 0x56, 0xae, 0xb8, 0xf3, 0x2c, 0xf4, 0x05, 0x7e, 0x43, 0xf2, 0x53, 0x0a, 0x03, 0xfb,
+ 0x0d, 0x82, 0x19, 0x84, 0x62, 0x44, 0xb1, 0x10, 0x6b, 0x54, 0x7f, 0xa0, 0x8b, 0xa7, 0xf7, 0x63,
+ 0xd0, 0xdf, 0x20, 0xdb, 0xa4, 0x91, 0xcc, 0x1b, 0x7b, 0x8d, 0x16, 0x62, 0x0e, 0xb3, 0x7f, 0xaf,
+ 0x0f, 0x4e, 0x77, 0x8c, 0x78, 0x47, 0x19, 0xcc, 0x0d, 0x27, 0x22, 0x77, 0x9c, 0x9d, 0x64, 0xbe,
+ 0xc4, 0xcb, 0xbc, 0x18, 0x4b, 0x38, 0x73, 0xdc, 0xe6, 0x39, 0x80, 0x12, 0xc2, 0x61, 0x91, 0xfa,
+ 0x47, 0x40, 0x4d, 0x61, 0x63, 0xf1, 0x41, 0x08, 0x1b, 0x9f, 0x07, 0x08, 0xc3, 0x06, 0xb7, 0x09,
+ 0xad, 0x0b, 0x8f, 0xf0, 0x38, 0x57, 0x54, 0xf5, 0x9a, 0x80, 0x60, 0x0d, 0x0b, 0x2d, 0xc2, 0x64,
+ 0x2b, 0xf0, 0x23, 0x2e, 0x6b, 0x5f, 0xe4, 0x66, 0xd3, 0xfd, 0x66, 0xb0, 0xb1, 0x4a, 0x02, 0x8e,
+ 0x53, 0x35, 0xd0, 0x4b, 0x30, 0x22, 0x02, 0x90, 0x55, 0x7c, 0xbf, 0x21, 0xc4, 0x7b, 0xca, 0x92,
+ 0xb8, 0x1a, 0x83, 0xb0, 0x8e, 0xa7, 0x55, 0x63, 0x02, 0xfc, 0xc1, 0xcc, 0x6a, 0x5c, 0x88, 0xaf,
+ 0xe1, 0x25, 0x92, 0x15, 0x0c, 0xf5, 0x94, 0xac, 0x20, 0x16, 0x78, 0x0e, 0xf7, 0xac, 0x4f, 0x86,
+ 0xae, 0x22, 0xc2, 0xaf, 0xf5, 0xc1, 0x11, 0xb1, 0x70, 0x1e, 0xf6, 0x72, 0xb9, 0x91, 0x5e, 0x2e,
+ 0x0f, 0x42, 0x24, 0xfa, 0xc1, 0x9a, 0x39, 0xec, 0x35, 0xf3, 0x93, 0x16, 0x98, 0x3c, 0x24, 0xfa,
+ 0x8f, 0x72, 0x13, 0xce, 0xbe, 0x94, 0xcb, 0x93, 0xc6, 0x91, 0xcc, 0xdf, 0x5b, 0xea, 0x59, 0xfb,
+ 0x7f, 0xb1, 0xe0, 0xd1, 0xae, 0x14, 0xd1, 0x12, 0x0c, 0x33, 0x46, 0x57, 0x7b, 0x17, 0x3f, 0xa9,
+ 0xdc, 0x2a, 0x24, 0x20, 0x87, 0xef, 0x8e, 0x6b, 0xa2, 0xa5, 0x54, 0x66, 0xdf, 0xa7, 0x32, 0x32,
+ 0xfb, 0x1e, 0x33, 0x86, 0xe7, 0x3e, 0x53, 0xfb, 0x7e, 0x89, 0xde, 0x38, 0xa6, 0x6f, 0xe6, 0x47,
+ 0x0c, 0x71, 0xae, 0x9d, 0x10, 0xe7, 0x22, 0x13, 0x5b, 0xbb, 0x43, 0xde, 0x80, 0x49, 0x16, 0x99,
+ 0x94, 0x39, 0xf9, 0x08, 0xa7, 0xce, 0x42, 0x6c, 0xc8, 0x7f, 0x2d, 0x01, 0xc3, 0x29, 0x6c, 0xfb,
+ 0x1f, 0x8a, 0x30, 0xc0, 0xb7, 0xdf, 0x21, 0x3c, 0x7c, 0x9f, 0x81, 0x61, 0xb7, 0xd9, 0x6c, 0xf3,
+ 0x64, 0xad, 0xfd, 0xb1, 0x59, 0x78, 0x59, 0x16, 0xe2, 0x18, 0x8e, 0x96, 0x85, 0x26, 0xa1, 0x43,
+ 0xf0, 0x73, 0xde, 0xf1, 0xd9, 0x45, 0x27, 0x72, 0x38, 0x17, 0xa7, 0xee, 0xd9, 0x58, 0xe7, 0x80,
+ 0x3e, 0x0d, 0x10, 0x46, 0x81, 0xeb, 0x6d, 0xd0, 0x32, 0x91, 0x21, 0xe3, 0xe9, 0x0e, 0xd4, 0xaa,
+ 0x0a, 0x99, 0xd3, 0x8c, 0xcf, 0x1c, 0x05, 0xc0, 0x1a, 0x45, 0x34, 0x6b, 0xdc, 0xf4, 0x33, 0x89,
+ 0xb9, 0x03, 0x4e, 0x35, 0x9e, 0xb3, 0x99, 0x97, 0x61, 0x58, 0x11, 0xef, 0x26, 0x57, 0x1c, 0xd5,
+ 0x19, 0xb6, 0x8f, 0xc1, 0x44, 0xa2, 0x6f, 0x07, 0x12, 0x4b, 0xfe, 0xbe, 0x05, 0x13, 0xbc, 0x33,
+ 0x4b, 0xde, 0xb6, 0xb8, 0x0d, 0xee, 0xc1, 0xd1, 0x46, 0xc6, 0xa9, 0x2c, 0xa6, 0xbf, 0xf7, 0x53,
+ 0x5c, 0x89, 0x21, 0xb3, 0xa0, 0x38, 0xb3, 0x0d, 0x74, 0x9e, 0xee, 0x38, 0x7a, 0xea, 0x3a, 0x0d,
+ 0x11, 0x99, 0x64, 0x94, 0xef, 0x36, 0x5e, 0x86, 0x15, 0xd4, 0xfe, 0x5b, 0x0b, 0xa6, 0x78, 0xcf,
+ 0xaf, 0x92, 0x1d, 0x75, 0x36, 0x7d, 0x27, 0xfb, 0x2e, 0xd2, 0x84, 0x17, 0x72, 0xd2, 0x84, 0xeb,
+ 0x9f, 0x56, 0xec, 0xf8, 0x69, 0x5f, 0xb5, 0x40, 0xac, 0x90, 0x43, 0x90, 0xb4, 0x7c, 0x9f, 0x29,
+ 0x69, 0x99, 0xc9, 0xdf, 0x04, 0x39, 0x22, 0x96, 0x7f, 0xb3, 0x60, 0x92, 0x23, 0xc4, 0x56, 0x10,
+ 0xdf, 0xd1, 0x79, 0x98, 0x37, 0xbf, 0x28, 0xd3, 0xac, 0xf5, 0x2a, 0xd9, 0x59, 0xf3, 0x2b, 0x4e,
+ 0xb4, 0x99, 0xfd, 0x51, 0xc6, 0x64, 0xf5, 0x75, 0x9c, 0xac, 0xba, 0xdc, 0x40, 0x46, 0x42, 0xc8,
+ 0x2e, 0x02, 0xe0, 0x83, 0x26, 0x84, 0xb4, 0xff, 0xd1, 0x02, 0xc4, 0x9b, 0x31, 0x18, 0x37, 0xca,
+ 0x0e, 0xb1, 0x52, 0xed, 0xa2, 0x8b, 0x8f, 0x26, 0x05, 0xc1, 0x1a, 0xd6, 0x03, 0x19, 0x9e, 0x84,
+ 0x29, 0x4b, 0xb1, 0xbb, 0x29, 0xcb, 0x01, 0x46, 0xf4, 0xab, 0x83, 0x90, 0x74, 0xeb, 0x44, 0x37,
+ 0x61, 0xb4, 0xe6, 0xb4, 0x9c, 0xdb, 0x6e, 0xc3, 0x8d, 0x5c, 0x12, 0x76, 0xb2, 0x73, 0x5b, 0xd0,
+ 0xf0, 0x84, 0xf1, 0x81, 0x56, 0x82, 0x0d, 0x3a, 0x68, 0x16, 0xa0, 0x15, 0xb8, 0xdb, 0x6e, 0x83,
+ 0x6c, 0x30, 0x81, 0x10, 0x8b, 0x85, 0xc4, 0x8d, 0xee, 0x64, 0x29, 0xd6, 0x30, 0x32, 0x42, 0x90,
+ 0x14, 0x1f, 0x72, 0x08, 0x12, 0x38, 0xb4, 0x10, 0x24, 0x7d, 0x07, 0x0a, 0x41, 0x32, 0x74, 0xe0,
+ 0x10, 0x24, 0xfd, 0x3d, 0x85, 0x20, 0xc1, 0x70, 0x5c, 0xf2, 0x9e, 0xf4, 0xff, 0xb2, 0xdb, 0x20,
+ 0xe2, 0xc1, 0xc1, 0x03, 0x38, 0xcd, 0xec, 0xed, 0x96, 0x8e, 0xe3, 0x4c, 0x0c, 0x9c, 0x53, 0x13,
+ 0x7d, 0x1c, 0xa6, 0x9d, 0x46, 0xc3, 0xbf, 0xa3, 0x26, 0x75, 0x29, 0xac, 0x39, 0x8d, 0x38, 0xae,
+ 0xdf, 0xd0, 0xfc, 0xa9, 0xbd, 0xdd, 0xd2, 0xf4, 0x5c, 0x0e, 0x0e, 0xce, 0xad, 0x8d, 0x5e, 0x83,
+ 0xe1, 0x56, 0xe0, 0xd7, 0x56, 0x34, 0xdf, 0xf3, 0x33, 0x74, 0x00, 0x2b, 0xb2, 0x70, 0x7f, 0xb7,
+ 0x34, 0xa6, 0xfe, 0xb0, 0x0b, 0x3f, 0xae, 0x90, 0x11, 0xdd, 0x63, 0xe4, 0x61, 0x47, 0xf7, 0x18,
+ 0x7d, 0xc0, 0xd1, 0x3d, 0xec, 0x2d, 0x38, 0x52, 0x25, 0x81, 0xeb, 0x34, 0xdc, 0x7b, 0x94, 0x27,
+ 0x97, 0x67, 0xe0, 0x1a, 0x0c, 0x07, 0x89, 0x53, 0xbf, 0xa7, 0xa0, 0xe7, 0x9a, 0x5c, 0x46, 0x9e,
+ 0xf2, 0x31, 0x21, 0xfb, 0xff, 0xb7, 0x60, 0x50, 0xb8, 0x8a, 0x1e, 0x02, 0x67, 0x3a, 0x67, 0xa8,
+ 0x64, 0x4a, 0xd9, 0x93, 0xc2, 0x3a, 0x93, 0xab, 0x8c, 0x29, 0x27, 0x94, 0x31, 0x8f, 0x76, 0x22,
+ 0xd2, 0x59, 0x0d, 0xf3, 0x9f, 0x15, 0xe9, 0x0b, 0xc1, 0x08, 0x5a, 0xf0, 0xf0, 0x87, 0x60, 0x15,
+ 0x06, 0x43, 0xe1, 0x34, 0x5f, 0xc8, 0xf7, 0xe5, 0x49, 0x4e, 0x62, 0x6c, 0x03, 0x29, 0xdc, 0xe4,
+ 0x25, 0x91, 0x4c, 0x6f, 0xfc, 0xe2, 0x43, 0xf4, 0xc6, 0xef, 0x16, 0xd6, 0xa1, 0xef, 0x41, 0x84,
+ 0x75, 0xb0, 0xbf, 0xce, 0x6e, 0x67, 0xbd, 0xfc, 0x10, 0x18, 0xb7, 0xcb, 0xe6, 0x3d, 0x6e, 0x77,
+ 0x58, 0x59, 0xa2, 0x53, 0x39, 0x0c, 0xdc, 0xef, 0x5a, 0x70, 0x3a, 0xe3, 0xab, 0x34, 0x6e, 0xee,
+ 0x59, 0x18, 0x72, 0xda, 0x75, 0x57, 0xed, 0x65, 0x4d, 0x5b, 0x3c, 0x27, 0xca, 0xb1, 0xc2, 0x40,
+ 0x0b, 0x30, 0x45, 0xee, 0xb6, 0x5c, 0xae, 0x86, 0xd7, 0x4d, 0xc7, 0x8b, 0xdc, 0xbf, 0x78, 0x29,
+ 0x09, 0xc4, 0x69, 0x7c, 0x15, 0x1a, 0xae, 0x98, 0x1b, 0x1a, 0xee, 0x37, 0x2d, 0x18, 0x51, 0x6e,
+ 0xe3, 0x0f, 0x7d, 0xb4, 0xdf, 0x30, 0x47, 0xfb, 0x91, 0x0e, 0xa3, 0x9d, 0x33, 0xcc, 0x7f, 0x53,
+ 0x50, 0xfd, 0xad, 0xf8, 0x41, 0xd4, 0x03, 0x97, 0x78, 0xff, 0x6e, 0x2f, 0x17, 0x61, 0xc4, 0x69,
+ 0xb5, 0x24, 0x40, 0xda, 0x2f, 0xb2, 0x14, 0x16, 0x71, 0x31, 0xd6, 0x71, 0x94, 0x17, 0x4e, 0x31,
+ 0xd7, 0x0b, 0xa7, 0x0e, 0x10, 0x39, 0xc1, 0x06, 0x89, 0x68, 0x99, 0x30, 0xb7, 0xce, 0x3f, 0x6f,
+ 0xda, 0x91, 0xdb, 0x98, 0x75, 0xbd, 0x28, 0x8c, 0x82, 0xd9, 0xb2, 0x17, 0x5d, 0x0f, 0xf8, 0x33,
+ 0x55, 0x0b, 0xc0, 0xa8, 0x68, 0x61, 0x8d, 0xae, 0x0c, 0x91, 0xc2, 0xda, 0xe8, 0x37, 0x0d, 0x61,
+ 0x56, 0x45, 0x39, 0x56, 0x18, 0xf6, 0xcb, 0xec, 0xf6, 0x61, 0x63, 0x7a, 0xb0, 0xc0, 0x82, 0xff,
+ 0x34, 0xaa, 0x66, 0x83, 0xa9, 0x84, 0x17, 0xf5, 0xf0, 0x85, 0x9d, 0x0f, 0x7b, 0xda, 0xb0, 0xee,
+ 0xcf, 0x1a, 0xc7, 0x38, 0x44, 0x9f, 0x4c, 0x19, 0x37, 0x3d, 0xd7, 0xe5, 0xd6, 0x38, 0x80, 0x39,
+ 0x13, 0xcb, 0x67, 0xc7, 0xb2, 0x7d, 0x95, 0x2b, 0x62, 0x5f, 0x68, 0xf9, 0xec, 0x04, 0x00, 0xc7,
+ 0x38, 0x94, 0x61, 0x53, 0x7f, 0xc2, 0x69, 0x14, 0x87, 0x3d, 0x57, 0xd8, 0x21, 0xd6, 0x30, 0xd0,
+ 0x05, 0x21, 0xb4, 0xe0, 0xba, 0x87, 0x47, 0x12, 0x42, 0x0b, 0x39, 0x5c, 0x9a, 0xa4, 0xe9, 0x22,
+ 0x8c, 0x90, 0xbb, 0x11, 0x09, 0x3c, 0xa7, 0x41, 0x5b, 0xe8, 0x8f, 0xa3, 0xeb, 0x2e, 0xc5, 0xc5,
+ 0x58, 0xc7, 0x41, 0x6b, 0x30, 0x11, 0x72, 0x59, 0x9e, 0x4a, 0xb6, 0xc1, 0x65, 0xa2, 0x4f, 0x2b,
+ 0x87, 0x7d, 0x13, 0xbc, 0xcf, 0x8a, 0xf8, 0xe9, 0x24, 0xc3, 0x98, 0x24, 0x49, 0xa0, 0xd7, 0x61,
+ 0xbc, 0xe1, 0x3b, 0xf5, 0x79, 0xa7, 0xe1, 0x78, 0x35, 0x36, 0x3e, 0x43, 0x46, 0x2c, 0xcb, 0xf1,
+ 0x6b, 0x06, 0x14, 0x27, 0xb0, 0x29, 0x83, 0xa8, 0x97, 0x88, 0x04, 0x31, 0x8e, 0xb7, 0x41, 0xc2,
+ 0xe9, 0x61, 0xf6, 0x55, 0x8c, 0x41, 0xbc, 0x96, 0x83, 0x83, 0x73, 0x6b, 0xa3, 0x4b, 0x30, 0x2a,
+ 0x3f, 0x5f, 0x8b, 0xfa, 0x13, 0x3b, 0x34, 0x69, 0x30, 0x6c, 0x60, 0xa2, 0x10, 0x8e, 0xc9, 0xff,
+ 0x6b, 0x81, 0xb3, 0xbe, 0xee, 0xd6, 0x44, 0x28, 0x0c, 0xee, 0xfc, 0xfd, 0x31, 0xe9, 0x69, 0xba,
+ 0x94, 0x85, 0xb4, 0xbf, 0x5b, 0x3a, 0x25, 0x46, 0x2d, 0x13, 0x8e, 0xb3, 0x69, 0xa3, 0x15, 0x38,
+ 0xc2, 0x6d, 0x60, 0x16, 0x36, 0x49, 0x6d, 0x4b, 0x6e, 0x38, 0xc6, 0x35, 0x6a, 0x8e, 0x3f, 0x57,
+ 0xd2, 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x07, 0xa6, 0x5b, 0xed, 0xdb, 0x0d, 0x37, 0xdc, 0x5c, 0xf5,
+ 0x23, 0x66, 0x42, 0x36, 0x57, 0xaf, 0x07, 0x24, 0xe4, 0xbe, 0xc1, 0xec, 0xea, 0x95, 0x91, 0x9a,
+ 0x2a, 0x39, 0x78, 0x38, 0x97, 0x02, 0xba, 0x07, 0xc7, 0x12, 0x0b, 0x41, 0x84, 0x5c, 0x19, 0xcf,
+ 0x4f, 0xb5, 0x55, 0xcd, 0xaa, 0x20, 0xa2, 0x17, 0x65, 0x81, 0x70, 0x76, 0x13, 0xe8, 0x15, 0x00,
+ 0xb7, 0xb5, 0xec, 0x34, 0xdd, 0x06, 0x7d, 0x8e, 0x1e, 0x61, 0x6b, 0x84, 0x3e, 0x4d, 0xa0, 0x5c,
+ 0x91, 0xa5, 0xf4, 0x6c, 0x16, 0xff, 0x76, 0xb0, 0x86, 0x8d, 0xae, 0xc1, 0xb8, 0xf8, 0xb7, 0x23,
+ 0xa6, 0x74, 0x4a, 0x65, 0x65, 0x1d, 0x97, 0x35, 0xd4, 0x3c, 0x26, 0x4a, 0x70, 0xa2, 0x2e, 0xda,
+ 0x80, 0xd3, 0x32, 0x25, 0xac, 0xbe, 0x3e, 0xe5, 0x1c, 0x84, 0x2c, 0xbf, 0xd5, 0x10, 0xf7, 0x29,
+ 0x9a, 0xeb, 0x84, 0x88, 0x3b, 0xd3, 0xa1, 0xf7, 0xba, 0xbe, 0xcc, 0xb9, 0xc7, 0xf8, 0xb1, 0x38,
+ 0x22, 0xe8, 0xb5, 0x24, 0x10, 0xa7, 0xf1, 0x91, 0x0f, 0xc7, 0x5c, 0x2f, 0x6b, 0x55, 0x1f, 0x67,
+ 0x84, 0x3e, 0xca, 0x9d, 0xe5, 0x3b, 0xaf, 0xe8, 0x4c, 0x38, 0xce, 0xa6, 0x8b, 0xca, 0x70, 0x24,
+ 0xe2, 0x05, 0x8b, 0x6e, 0xc8, 0xd3, 0xe7, 0xd0, 0x67, 0xdf, 0x09, 0xd6, 0xdc, 0x09, 0xba, 0x9a,
+ 0xd7, 0xd2, 0x60, 0x9c, 0x55, 0xe7, 0xbd, 0x19, 0x80, 0x7e, 0xc3, 0xa2, 0xb5, 0x35, 0x46, 0x1f,
+ 0x7d, 0x06, 0x46, 0xf5, 0xf1, 0x11, 0x4c, 0xcb, 0xb9, 0x6c, 0x3e, 0x58, 0x3b, 0x5e, 0xf8, 0x33,
+ 0x41, 0x1d, 0x21, 0x3a, 0x0c, 0x1b, 0x14, 0x51, 0x2d, 0x23, 0xc8, 0xc5, 0x85, 0xde, 0x98, 0xa2,
+ 0xde, 0xed, 0x1f, 0x09, 0x64, 0xef, 0x1c, 0x74, 0x0d, 0x86, 0x6a, 0x0d, 0x97, 0x78, 0x51, 0xb9,
+ 0xd2, 0x29, 0x50, 0xeb, 0x82, 0xc0, 0x11, 0x5b, 0x51, 0x64, 0xbd, 0xe2, 0x65, 0x58, 0x51, 0xb0,
+ 0x2f, 0xc1, 0x48, 0xb5, 0x41, 0x48, 0x8b, 0xfb, 0x71, 0xa1, 0xa7, 0xd8, 0xc3, 0x84, 0xb1, 0x96,
+ 0x16, 0x63, 0x2d, 0xf5, 0x37, 0x07, 0x63, 0x2a, 0x25, 0xdc, 0xfe, 0xb3, 0x02, 0x94, 0xba, 0x24,
+ 0x5f, 0x4b, 0xe8, 0xdb, 0xac, 0x9e, 0xf4, 0x6d, 0x73, 0x30, 0x11, 0xff, 0xd3, 0x45, 0x79, 0xca,
+ 0x18, 0xfa, 0xa6, 0x09, 0xc6, 0x49, 0xfc, 0x9e, 0xfd, 0x5a, 0x74, 0x95, 0x5d, 0x5f, 0x57, 0xcf,
+ 0x2c, 0x43, 0x55, 0xdf, 0xdf, 0xfb, 0xdb, 0x3b, 0x57, 0xed, 0x6a, 0x7f, 0xbd, 0x00, 0xc7, 0xd4,
+ 0x10, 0x7e, 0xef, 0x0e, 0xdc, 0x8d, 0xf4, 0xc0, 0x3d, 0x00, 0xa5, 0xb5, 0x7d, 0x1d, 0x06, 0x78,
+ 0xf4, 0xd8, 0x1e, 0x78, 0xfe, 0xc7, 0xcc, 0x40, 0xfe, 0x8a, 0xcd, 0x34, 0x82, 0xf9, 0xff, 0x98,
+ 0x05, 0x13, 0x09, 0x07, 0x49, 0x84, 0x35, 0x2f, 0xfa, 0xfb, 0xe1, 0xcb, 0xb3, 0x38, 0xfe, 0xb3,
+ 0xd0, 0xb7, 0xe9, 0x2b, 0x23, 0x65, 0x85, 0x71, 0xc5, 0x0f, 0x23, 0xcc, 0x20, 0xf6, 0xdf, 0x59,
+ 0xd0, 0xbf, 0xe6, 0xb8, 0x5e, 0x24, 0xb5, 0x1f, 0x56, 0x8e, 0xf6, 0xa3, 0x97, 0xef, 0x42, 0x2f,
+ 0xc1, 0x00, 0x59, 0x5f, 0x27, 0xb5, 0x48, 0xcc, 0xaa, 0x8c, 0xa6, 0x31, 0xb0, 0xc4, 0x4a, 0x29,
+ 0x13, 0xca, 0x1a, 0xe3, 0x7f, 0xb1, 0x40, 0x46, 0xb7, 0x60, 0x38, 0x72, 0x9b, 0x64, 0xae, 0x5e,
+ 0x17, 0x36, 0x01, 0xf7, 0x11, 0x02, 0x66, 0x4d, 0x12, 0xc0, 0x31, 0x2d, 0xfb, 0xcb, 0x05, 0x80,
+ 0x38, 0x5a, 0x5d, 0xb7, 0x4f, 0x9c, 0x4f, 0x69, 0x8b, 0xcf, 0x65, 0x68, 0x8b, 0x51, 0x4c, 0x30,
+ 0x43, 0x55, 0xac, 0x86, 0xa9, 0xd8, 0xd3, 0x30, 0xf5, 0x1d, 0x64, 0x98, 0x16, 0x60, 0x2a, 0x8e,
+ 0xb6, 0x67, 0x06, 0x1b, 0x65, 0xf7, 0xf7, 0x5a, 0x12, 0x88, 0xd3, 0xf8, 0x36, 0x81, 0xb3, 0x2a,
+ 0xe8, 0x98, 0xb8, 0x0b, 0x99, 0x2b, 0x81, 0xae, 0x7d, 0xef, 0x32, 0x4e, 0xb1, 0x3a, 0xbc, 0x90,
+ 0xab, 0x0e, 0xff, 0x45, 0x0b, 0x8e, 0x26, 0xdb, 0x61, 0x7e, 0xf7, 0x5f, 0xb4, 0xe0, 0x58, 0x9c,
+ 0x7b, 0x28, 0x6d, 0x82, 0xf0, 0x62, 0xc7, 0x40, 0x6a, 0x39, 0x3d, 0x8e, 0xc3, 0xb6, 0xac, 0x64,
+ 0x91, 0xc6, 0xd9, 0x2d, 0xda, 0xff, 0x5f, 0x1f, 0x4c, 0xe7, 0x45, 0x60, 0x63, 0x9e, 0x46, 0xce,
+ 0xdd, 0xea, 0x16, 0xb9, 0x23, 0xfc, 0x39, 0x62, 0x4f, 0x23, 0x5e, 0x8c, 0x25, 0x3c, 0x99, 0x6e,
+ 0xaa, 0xd0, 0x63, 0xba, 0xa9, 0x4d, 0x98, 0xba, 0xb3, 0x49, 0xbc, 0x1b, 0x5e, 0xe8, 0x44, 0x6e,
+ 0xb8, 0xee, 0x32, 0x05, 0x3a, 0x5f, 0x37, 0xaf, 0x48, 0xaf, 0x8b, 0x5b, 0x49, 0x84, 0xfd, 0xdd,
+ 0xd2, 0x69, 0xa3, 0x20, 0xee, 0x32, 0x3f, 0x48, 0x70, 0x9a, 0x68, 0x3a, 0x5b, 0x57, 0xdf, 0x43,
+ 0xce, 0xd6, 0xd5, 0x74, 0x85, 0xd9, 0x8d, 0x74, 0x23, 0x61, 0xcf, 0xd6, 0x15, 0x55, 0x8a, 0x35,
+ 0x0c, 0xf4, 0x29, 0x40, 0x7a, 0xba, 0x45, 0x23, 0x00, 0xee, 0x73, 0x7b, 0xbb, 0x25, 0xb4, 0x9a,
+ 0x82, 0xee, 0xef, 0x96, 0x8e, 0xd0, 0xd2, 0xb2, 0x47, 0x9f, 0xbf, 0x71, 0xd4, 0xc0, 0x0c, 0x42,
+ 0xe8, 0x16, 0x4c, 0xd2, 0x52, 0xb6, 0xa3, 0x64, 0x74, 0x5d, 0xfe, 0x64, 0x7d, 0x66, 0x6f, 0xb7,
+ 0x34, 0xb9, 0x9a, 0x80, 0xe5, 0x91, 0x4e, 0x11, 0xc9, 0x48, 0xda, 0x35, 0xd4, 0x6b, 0xd2, 0x2e,
+ 0xfb, 0x8b, 0x16, 0x9c, 0xa4, 0x17, 0x5c, 0xfd, 0x5a, 0x8e, 0x16, 0xdd, 0x69, 0xb9, 0x5c, 0x4f,
+ 0x23, 0xae, 0x1a, 0x26, 0xab, 0xab, 0x94, 0xb9, 0x96, 0x46, 0x41, 0xe9, 0x09, 0xbf, 0xe5, 0x7a,
+ 0xf5, 0xe4, 0x09, 0x7f, 0xd5, 0xf5, 0xea, 0x98, 0x41, 0xd4, 0x95, 0x55, 0xcc, 0x8d, 0xd6, 0xff,
+ 0x35, 0xba, 0x57, 0x69, 0x5f, 0xbe, 0xa3, 0xdd, 0x40, 0xcf, 0xe8, 0x3a, 0x55, 0x61, 0x3e, 0x99,
+ 0xab, 0x4f, 0xfd, 0x82, 0x05, 0xc2, 0xfb, 0xbd, 0x87, 0x3b, 0xf9, 0x6d, 0x18, 0xdd, 0x4e, 0xa7,
+ 0xa2, 0x3d, 0x9b, 0x1f, 0x0e, 0x40, 0x24, 0xa0, 0x55, 0x2c, 0xba, 0x91, 0x76, 0xd6, 0xa0, 0x65,
+ 0xd7, 0x41, 0x40, 0x17, 0x09, 0xd3, 0x6a, 0x74, 0xef, 0xcd, 0xf3, 0x00, 0x75, 0x86, 0xcb, 0xf2,
+ 0xd3, 0x17, 0x4c, 0x8e, 0x6b, 0x51, 0x41, 0xb0, 0x86, 0x65, 0xff, 0x7a, 0x11, 0x46, 0x64, 0xea,
+ 0xd3, 0xb6, 0xd7, 0x8b, 0xec, 0x51, 0x67, 0x9c, 0x0a, 0x5d, 0x19, 0xa7, 0x77, 0x60, 0x2a, 0x20,
+ 0xb5, 0x76, 0x10, 0xba, 0xdb, 0x44, 0x82, 0xc5, 0x26, 0x99, 0xe5, 0xc9, 0x22, 0x12, 0xc0, 0x7d,
+ 0x16, 0x22, 0x2b, 0x51, 0xc8, 0x94, 0xc6, 0x69, 0x42, 0xe8, 0x02, 0x0c, 0x33, 0xd1, 0x7b, 0x25,
+ 0x16, 0x08, 0x2b, 0xc1, 0xd7, 0x8a, 0x04, 0xe0, 0x18, 0x87, 0x3d, 0x0e, 0xda, 0xb7, 0x19, 0x7a,
+ 0xc2, 0x13, 0xbc, 0xca, 0x8b, 0xb1, 0x84, 0xa3, 0x8f, 0xc3, 0x24, 0xaf, 0x17, 0xf8, 0x2d, 0x67,
+ 0x83, 0xab, 0x04, 0xfb, 0x55, 0x78, 0x9d, 0xc9, 0x95, 0x04, 0x6c, 0x7f, 0xb7, 0x74, 0x34, 0x59,
+ 0xc6, 0xba, 0x9d, 0xa2, 0xc2, 0x2c, 0xff, 0x78, 0x23, 0xf4, 0xce, 0x48, 0x19, 0x0c, 0xc6, 0x20,
+ 0xac, 0xe3, 0xd9, 0xff, 0x6a, 0xc1, 0x94, 0x36, 0x55, 0x3d, 0xe7, 0xeb, 0x30, 0x06, 0xa9, 0xd0,
+ 0xc3, 0x20, 0x1d, 0x2c, 0xda, 0x43, 0xe6, 0x0c, 0xf7, 0x3d, 0xa0, 0x19, 0xb6, 0x3f, 0x03, 0x28,
+ 0x9d, 0x57, 0x17, 0xbd, 0xc9, 0x0d, 0xf9, 0xdd, 0x80, 0xd4, 0x3b, 0x29, 0xfc, 0xf5, 0xc8, 0x39,
+ 0xd2, 0x73, 0x95, 0xd7, 0xc2, 0xaa, 0xbe, 0xfd, 0xe3, 0x7d, 0x30, 0x99, 0x8c, 0xd5, 0x81, 0xae,
+ 0xc0, 0x00, 0xe7, 0xd2, 0x05, 0xf9, 0x0e, 0xf6, 0x64, 0x5a, 0x84, 0x0f, 0x9e, 0x4b, 0x87, 0x73,
+ 0xf7, 0xa2, 0x3e, 0x7a, 0x07, 0x46, 0xea, 0xfe, 0x1d, 0xef, 0x8e, 0x13, 0xd4, 0xe7, 0x2a, 0x65,
+ 0x71, 0x42, 0x64, 0x0a, 0xa0, 0x16, 0x63, 0x34, 0x3d, 0x6a, 0x08, 0xb3, 0x9d, 0x88, 0x41, 0x58,
+ 0x27, 0x87, 0xd6, 0x58, 0x7a, 0xa7, 0x75, 0x77, 0x63, 0xc5, 0x69, 0x75, 0xf2, 0xea, 0x5a, 0x90,
+ 0x48, 0x1a, 0xe5, 0x31, 0x91, 0x03, 0x8a, 0x03, 0x70, 0x4c, 0x08, 0x7d, 0x0e, 0x8e, 0x84, 0x39,
+ 0x2a, 0xb1, 0xbc, 0x34, 0xeb, 0x9d, 0xb4, 0x44, 0x5c, 0x98, 0x92, 0xa5, 0x3c, 0xcb, 0x6a, 0x06,
+ 0xdd, 0x05, 0x24, 0x44, 0xcf, 0x6b, 0x41, 0x3b, 0x8c, 0xe6, 0xdb, 0x5e, 0xbd, 0x21, 0xd3, 0x3f,
+ 0x7d, 0x38, 0x5b, 0x4e, 0x90, 0xc4, 0xd6, 0xda, 0x66, 0xe1, 0x85, 0xd3, 0x18, 0x38, 0xa3, 0x0d,
+ 0xfb, 0x0b, 0x7d, 0x30, 0x23, 0x13, 0x59, 0x67, 0x78, 0xaf, 0x7c, 0xde, 0x4a, 0xb8, 0xaf, 0xbc,
+ 0x92, 0x7f, 0xd0, 0x3f, 0x34, 0x27, 0x96, 0x2f, 0xa5, 0x9d, 0x58, 0x5e, 0x3b, 0x60, 0x37, 0x1e,
+ 0x98, 0x2b, 0xcb, 0xf7, 0xac, 0xff, 0xc9, 0xde, 0x51, 0x30, 0xae, 0x66, 0x84, 0x79, 0xec, 0xf6,
+ 0x8a, 0x54, 0x1d, 0xe5, 0x3c, 0xff, 0xaf, 0x08, 0x1c, 0xe3, 0xb2, 0x1f, 0x95, 0x11, 0xde, 0xd9,
+ 0x39, 0xab, 0xe8, 0x50, 0x9a, 0xa4, 0xd9, 0x8a, 0x76, 0x16, 0xdd, 0x40, 0xf4, 0x38, 0x93, 0xe6,
+ 0x92, 0xc0, 0x49, 0xd3, 0x94, 0x10, 0xac, 0xe8, 0xa0, 0x6d, 0x98, 0xda, 0x60, 0x11, 0x9f, 0xb4,
+ 0x9c, 0xd2, 0xe2, 0x5c, 0xc8, 0xdc, 0xb7, 0x97, 0x17, 0x96, 0xf2, 0x13, 0x50, 0xf3, 0xc7, 0x5f,
+ 0x0a, 0x05, 0xa7, 0x9b, 0xa0, 0x5b, 0xe3, 0xa8, 0x73, 0x27, 0x5c, 0x6a, 0x38, 0x61, 0xe4, 0xd6,
+ 0xe6, 0x1b, 0x7e, 0x6d, 0xab, 0x1a, 0xf9, 0x81, 0x4c, 0x16, 0x99, 0xf9, 0xf6, 0x9a, 0xbb, 0x55,
+ 0x4d, 0xe1, 0x1b, 0xcd, 0x4f, 0xef, 0xed, 0x96, 0x8e, 0x66, 0x61, 0xe1, 0xcc, 0xb6, 0xd0, 0x2a,
+ 0x0c, 0x6e, 0xb8, 0x11, 0x26, 0x2d, 0x5f, 0x9c, 0x16, 0x99, 0x47, 0xe1, 0x65, 0x8e, 0x62, 0xb4,
+ 0xc4, 0x22, 0x52, 0x09, 0x00, 0x96, 0x44, 0xd0, 0x9b, 0xea, 0x12, 0x18, 0xc8, 0x17, 0xc0, 0xa6,
+ 0x6d, 0xef, 0x32, 0xaf, 0x81, 0xd7, 0xa1, 0xe8, 0xad, 0x87, 0x9d, 0x62, 0xf1, 0xac, 0x2e, 0x1b,
+ 0xf2, 0xb3, 0xf9, 0x41, 0xfa, 0x34, 0x5e, 0x5d, 0xae, 0x62, 0x5a, 0x91, 0xb9, 0xbd, 0x86, 0xb5,
+ 0xd0, 0x15, 0x89, 0xa7, 0x32, 0xbd, 0x80, 0xcb, 0xd5, 0x85, 0x6a, 0xd9, 0xa0, 0xc1, 0xa2, 0x1a,
+ 0xb2, 0x62, 0xcc, 0xab, 0xa3, 0x9b, 0x30, 0xbc, 0xc1, 0x0f, 0xbe, 0xf5, 0x50, 0x24, 0xb3, 0xcf,
+ 0xbc, 0x8c, 0x2e, 0x4b, 0x24, 0x83, 0x1e, 0xbb, 0x32, 0x14, 0x08, 0xc7, 0xa4, 0xd0, 0x17, 0x2c,
+ 0x38, 0xd6, 0x4a, 0x48, 0x50, 0x99, 0xb3, 0x9a, 0x30, 0x53, 0xcb, 0x74, 0x00, 0xa8, 0x64, 0x55,
+ 0x30, 0x1a, 0x64, 0xea, 0x97, 0x4c, 0x34, 0x9c, 0xdd, 0x1c, 0x1d, 0xe8, 0xe0, 0x76, 0xbd, 0x53,
+ 0xae, 0xa2, 0x44, 0x60, 0x22, 0x3e, 0xd0, 0x78, 0x7e, 0x11, 0xd3, 0x8a, 0x68, 0x0d, 0x60, 0xbd,
+ 0x41, 0x44, 0xc4, 0x47, 0x61, 0x14, 0x95, 0x79, 0xfb, 0x2f, 0x2b, 0x2c, 0x41, 0x87, 0xbd, 0x44,
+ 0xe3, 0x52, 0xac, 0xd1, 0xa1, 0x4b, 0xa9, 0xe6, 0x7a, 0x75, 0x12, 0x30, 0xe5, 0x56, 0xce, 0x52,
+ 0x5a, 0x60, 0x18, 0xe9, 0xa5, 0xc4, 0xcb, 0xb1, 0xa0, 0xc0, 0x68, 0x91, 0xd6, 0xe6, 0x7a, 0xd8,
+ 0x29, 0x2b, 0xc6, 0x02, 0x69, 0x6d, 0x26, 0x16, 0x14, 0xa7, 0xc5, 0xca, 0xb1, 0xa0, 0x40, 0xb7,
+ 0xcc, 0x3a, 0xdd, 0x40, 0x24, 0x98, 0x9e, 0xc8, 0xdf, 0x32, 0xcb, 0x1c, 0x25, 0xbd, 0x65, 0x04,
+ 0x00, 0x4b, 0x22, 0xe8, 0xd3, 0x26, 0xb7, 0x33, 0xc9, 0x68, 0x3e, 0xd3, 0x85, 0xdb, 0x31, 0xe8,
+ 0x76, 0xe6, 0x77, 0x5e, 0x81, 0xc2, 0x7a, 0x8d, 0x29, 0xc5, 0x72, 0x74, 0x06, 0xcb, 0x0b, 0x06,
+ 0x35, 0x16, 0x65, 0x7e, 0x79, 0x01, 0x17, 0xd6, 0x6b, 0x74, 0xe9, 0x3b, 0xf7, 0xda, 0x01, 0x59,
+ 0x76, 0x1b, 0x44, 0x64, 0xc8, 0xc8, 0x5c, 0xfa, 0x73, 0x12, 0x29, 0xbd, 0xf4, 0x15, 0x08, 0xc7,
+ 0xa4, 0x28, 0xdd, 0x98, 0x07, 0x3b, 0x92, 0x4f, 0x57, 0xb1, 0x5a, 0x69, 0xba, 0x99, 0x5c, 0xd8,
+ 0x16, 0x8c, 0x6d, 0x87, 0xad, 0x4d, 0x22, 0x4f, 0x45, 0xa6, 0xae, 0xcb, 0x89, 0x54, 0x71, 0x53,
+ 0x20, 0xba, 0x41, 0xd4, 0x76, 0x1a, 0xa9, 0x83, 0x9c, 0x89, 0x56, 0x6e, 0xea, 0xc4, 0xb0, 0x49,
+ 0x9b, 0x2e, 0x84, 0x77, 0x79, 0x38, 0x39, 0xa6, 0xb8, 0xcb, 0x59, 0x08, 0x19, 0x11, 0xe7, 0xf8,
+ 0x42, 0x10, 0x00, 0x2c, 0x89, 0xa8, 0xc1, 0x66, 0x17, 0xd0, 0xf1, 0x2e, 0x83, 0x9d, 0xea, 0x6f,
+ 0x3c, 0xd8, 0xec, 0xc2, 0x89, 0x49, 0xb1, 0x8b, 0xa6, 0xb5, 0xe9, 0x47, 0xbe, 0x97, 0xb8, 0xe4,
+ 0x4e, 0xe4, 0x5f, 0x34, 0x95, 0x0c, 0xfc, 0xf4, 0x45, 0x93, 0x85, 0x85, 0x33, 0xdb, 0xa2, 0x1f,
+ 0xd7, 0x92, 0x91, 0x01, 0x45, 0x16, 0x8f, 0xa7, 0x72, 0x02, 0x6b, 0xa6, 0xc3, 0x07, 0xf2, 0x8f,
+ 0x53, 0x20, 0x1c, 0x93, 0x42, 0x75, 0x18, 0x6f, 0x19, 0x11, 0x67, 0x59, 0x36, 0x92, 0x1c, 0xbe,
+ 0x20, 0x2b, 0x36, 0x2d, 0x97, 0x10, 0x99, 0x10, 0x9c, 0xa0, 0xc9, 0x2c, 0xf7, 0xb8, 0xab, 0x1f,
+ 0x4b, 0x56, 0x92, 0x33, 0xd5, 0x19, 0xde, 0x80, 0x7c, 0xaa, 0x05, 0x00, 0x4b, 0x22, 0x74, 0x34,
+ 0x84, 0x83, 0x9a, 0x1f, 0xb2, 0x9c, 0x3f, 0x79, 0x0a, 0xf6, 0x2c, 0x35, 0x91, 0x0c, 0xb3, 0x2e,
+ 0x40, 0x38, 0x26, 0x45, 0x4f, 0x72, 0x7a, 0xe1, 0x9d, 0xca, 0x3f, 0xc9, 0x93, 0xd7, 0x1d, 0x3b,
+ 0xc9, 0xe9, 0x65, 0x57, 0x14, 0x57, 0x9d, 0x8a, 0x0a, 0xce, 0xf2, 0x95, 0xe4, 0xf4, 0x4b, 0x85,
+ 0x15, 0x4f, 0xf7, 0x4b, 0x81, 0x70, 0x4c, 0x8a, 0x5d, 0xc5, 0x2c, 0x34, 0xdd, 0x99, 0x0e, 0x57,
+ 0x31, 0x45, 0xc8, 0xb8, 0x8a, 0xb5, 0xd0, 0x75, 0xf6, 0x8f, 0x17, 0xe0, 0x4c, 0xe7, 0x7d, 0x1b,
+ 0xeb, 0xd0, 0x2a, 0xb1, 0xcd, 0x52, 0x42, 0x87, 0xc6, 0x25, 0x3a, 0x31, 0x56, 0xcf, 0x01, 0x87,
+ 0x2f, 0xc3, 0x94, 0x72, 0x47, 0x6c, 0xb8, 0xb5, 0x1d, 0x2d, 0x49, 0xa9, 0x0a, 0xcd, 0x53, 0x4d,
+ 0x22, 0xe0, 0x74, 0x1d, 0x34, 0x07, 0x13, 0x46, 0x61, 0x79, 0x51, 0x3c, 0xff, 0xe3, 0x4c, 0x1b,
+ 0x26, 0x18, 0x27, 0xf1, 0xed, 0xdf, 0xb0, 0xe0, 0x44, 0x4e, 0xfe, 0xfb, 0x9e, 0xe3, 0xe9, 0xae,
+ 0xc3, 0x44, 0xcb, 0xac, 0xda, 0x25, 0x04, 0xb8, 0x91, 0x65, 0x5f, 0xf5, 0x35, 0x01, 0xc0, 0x49,
+ 0xa2, 0xf6, 0xaf, 0x15, 0xe0, 0x74, 0x47, 0xfb, 0x7a, 0x84, 0xe1, 0xf8, 0x46, 0x33, 0x74, 0x16,
+ 0x02, 0x52, 0x27, 0x5e, 0xe4, 0x3a, 0x8d, 0x6a, 0x8b, 0xd4, 0x34, 0x2d, 0x28, 0x33, 0x54, 0xbf,
+ 0xbc, 0x52, 0x9d, 0x4b, 0x63, 0xe0, 0x9c, 0x9a, 0x68, 0x19, 0x50, 0x1a, 0x22, 0x66, 0x98, 0x3d,
+ 0x71, 0xd3, 0xf4, 0x70, 0x46, 0x0d, 0xf4, 0x32, 0x8c, 0x29, 0xbb, 0x7d, 0x6d, 0xc6, 0xd9, 0x05,
+ 0x81, 0x75, 0x00, 0x36, 0xf1, 0xd0, 0x45, 0x9e, 0x82, 0x49, 0x24, 0xeb, 0x12, 0x2a, 0xd3, 0x09,
+ 0x99, 0x5f, 0x49, 0x14, 0x63, 0x1d, 0x67, 0xfe, 0xd2, 0x5f, 0x7c, 0xeb, 0xcc, 0x87, 0xfe, 0xea,
+ 0x5b, 0x67, 0x3e, 0xf4, 0xb7, 0xdf, 0x3a, 0xf3, 0xa1, 0x1f, 0xda, 0x3b, 0x63, 0xfd, 0xc5, 0xde,
+ 0x19, 0xeb, 0xaf, 0xf6, 0xce, 0x58, 0x7f, 0xbb, 0x77, 0xc6, 0xfa, 0xdf, 0xf7, 0xce, 0x58, 0x5f,
+ 0xfe, 0x3f, 0xce, 0x7c, 0xe8, 0x6d, 0x14, 0x47, 0xa8, 0xbe, 0x40, 0x67, 0xe7, 0xc2, 0xf6, 0xc5,
+ 0xff, 0x10, 0x00, 0x00, 0xff, 0xff, 0xf5, 0xf1, 0x8c, 0x4c, 0x2d, 0x26, 0x01, 0x00,
}
func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) {
@@ -9887,6 +9921,13 @@ func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.StopSignal != nil {
+ i -= len(*m.StopSignal)
+ copy(dAtA[i:], *m.StopSignal)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StopSignal)))
+ i--
+ dAtA[i] = 0x7a
+ }
if len(m.AllocatedResourcesStatus) > 0 {
for iNdEx := len(m.AllocatedResourcesStatus) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -12258,6 +12299,13 @@ func (m *Lifecycle) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.StopSignal != nil {
+ i -= len(*m.StopSignal)
+ copy(dAtA[i:], *m.StopSignal)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StopSignal)))
+ i--
+ dAtA[i] = 0x1a
+ }
if m.PreStop != nil {
{
size, err := m.PreStop.MarshalToSizedBuffer(dAtA[:i])
@@ -14135,6 +14183,34 @@ func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *NodeSwapStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NodeSwapStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NodeSwapStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Capacity != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Capacity))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *NodeSystemInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -14155,6 +14231,18 @@ func (m *NodeSystemInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.Swap != nil {
+ {
+ size, err := m.Swap.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x5a
+ }
i -= len(m.Architecture)
copy(dAtA[i:], m.Architecture)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture)))
@@ -15723,6 +15811,9 @@ func (m *PodCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x38
i -= len(m.Message)
copy(dAtA[i:], m.Message)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
@@ -16994,6 +17085,11 @@ func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x88
if len(m.HostIPs) > 0 {
for iNdEx := len(m.HostIPs) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -22542,6 +22638,10 @@ func (m *ContainerStatus) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
+ if m.StopSignal != nil {
+ l = len(*m.StopSignal)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -23382,6 +23482,10 @@ func (m *Lifecycle) Size() (n int) {
l = m.PreStop.Size()
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.StopSignal != nil {
+ l = len(*m.StopSignal)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -24067,6 +24171,18 @@ func (m *NodeStatus) Size() (n int) {
return n
}
+func (m *NodeSwapStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Capacity != nil {
+ n += 1 + sovGenerated(uint64(*m.Capacity))
+ }
+ return n
+}
+
func (m *NodeSystemInfo) Size() (n int) {
if m == nil {
return 0
@@ -24093,6 +24209,10 @@ func (m *NodeSystemInfo) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Architecture)
n += 1 + l + sovGenerated(uint64(l))
+ if m.Swap != nil {
+ l = m.Swap.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -24650,6 +24770,7 @@ func (m *PodCondition) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Message)
n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
return n
}
@@ -25174,6 +25295,7 @@ func (m *PodStatus) Size() (n int) {
n += 2 + l + sovGenerated(uint64(l))
}
}
+ n += 2 + sovGenerated(uint64(m.ObservedGeneration))
return n
}
@@ -27457,6 +27579,7 @@ func (this *ContainerStatus) String() string {
`VolumeMounts:` + repeatedStringForVolumeMounts + `,`,
`User:` + strings.Replace(this.User.String(), "ContainerUser", "ContainerUser", 1) + `,`,
`AllocatedResourcesStatus:` + repeatedStringForAllocatedResourcesStatus + `,`,
+ `StopSignal:` + valueToStringGenerated(this.StopSignal) + `,`,
`}`,
}, "")
return s
@@ -28080,6 +28203,7 @@ func (this *Lifecycle) String() string {
s := strings.Join([]string{`&Lifecycle{`,
`PostStart:` + strings.Replace(this.PostStart.String(), "LifecycleHandler", "LifecycleHandler", 1) + `,`,
`PreStop:` + strings.Replace(this.PreStop.String(), "LifecycleHandler", "LifecycleHandler", 1) + `,`,
+ `StopSignal:` + valueToStringGenerated(this.StopSignal) + `,`,
`}`,
}, "")
return s
@@ -28658,6 +28782,16 @@ func (this *NodeStatus) String() string {
}, "")
return s
}
+func (this *NodeSwapStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NodeSwapStatus{`,
+ `Capacity:` + valueToStringGenerated(this.Capacity) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *NodeSystemInfo) String() string {
if this == nil {
return "nil"
@@ -28673,6 +28807,7 @@ func (this *NodeSystemInfo) String() string {
`KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`,
`OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`,
`Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`,
+ `Swap:` + strings.Replace(this.Swap.String(), "NodeSwapStatus", "NodeSwapStatus", 1) + `,`,
`}`,
}, "")
return s
@@ -29045,6 +29180,7 @@ func (this *PodCondition) String() string {
`LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
`}`,
}, "")
return s
@@ -29427,6 +29563,7 @@ func (this *PodStatus) String() string {
`Resize:` + fmt.Sprintf("%v", this.Resize) + `,`,
`ResourceClaimStatuses:` + repeatedStringForResourceClaimStatuses + `,`,
`HostIPs:` + repeatedStringForHostIPs + `,`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
`}`,
}, "")
return s
@@ -37794,88 +37931,122 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Resources == nil {
- m.Resources = &ResourceRequirements{}
- }
- if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 12:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.VolumeMounts = append(m.VolumeMounts, VolumeMountStatus{})
- if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 13:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.User == nil {
- m.User = &ContainerUser{}
- }
- if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if m.Resources == nil {
+ m.Resources = &ResourceRequirements{}
+ }
+ if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumeMounts = append(m.VolumeMounts, VolumeMountStatus{})
+ if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.User == nil {
+ m.User = &ContainerUser{}
+ }
+ if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 14:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResourcesStatus", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AllocatedResourcesStatus = append(m.AllocatedResourcesStatus, ResourceStatus{})
+ if err := m.AllocatedResourcesStatus[len(m.AllocatedResourcesStatus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 14:
+ case 15:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResourcesStatus", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field StopSignal", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -37885,25 +38056,24 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.AllocatedResourcesStatus = append(m.AllocatedResourcesStatus, ResourceStatus{})
- if err := m.AllocatedResourcesStatus[len(m.AllocatedResourcesStatus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ s := Signal(dAtA[iNdEx:postIndex])
+ m.StopSignal = &s
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -45056,6 +45226,39 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StopSignal", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := Signal(dAtA[iNdEx:postIndex])
+ m.StopSignal = &s
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -50743,6 +50946,76 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *NodeSwapStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NodeSwapStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NodeSwapStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Capacity = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -51092,6 +51365,42 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error {
}
m.Architecture = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Swap", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Swap == nil {
+ m.Swap = &NodeSwapStatus{}
+ }
+ if err := m.Swap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -56087,6 +56396,25 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error {
}
m.Message = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -60340,6 +60668,25 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 17:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto
index 08706987c..9b48fb1c3 100644
--- a/vendor/k8s.io/api/core/v1/generated.proto
+++ b/vendor/k8s.io/api/core/v1/generated.proto
@@ -1103,6 +1103,11 @@ message ContainerStatus {
// +listType=map
// +listMapKey=name
repeated ResourceStatus allocatedResourcesStatus = 14;
+
+ // StopSignal reports the effective stop signal for this container
+ // +featureGate=ContainerStopSignals
+ // +optional
+ optional string stopSignal = 15;
}
// ContainerUser represents user identity information
@@ -1194,6 +1199,7 @@ message EmptyDirVolumeSource {
}
// EndpointAddress is a tuple that describes single IP address.
+// Deprecated: This API is deprecated in v1.33+.
// +structType=atomic
message EndpointAddress {
// The IP of this endpoint.
@@ -1215,6 +1221,7 @@ message EndpointAddress {
}
// EndpointPort is a tuple that describes a single port.
+// Deprecated: This API is deprecated in v1.33+.
// +structType=atomic
message EndpointPort {
// The name of this port. This must match the 'name' field in the
@@ -1265,6 +1272,8 @@ message EndpointPort {
//
// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
// b: [ 10.10.1.1:309, 10.10.2.2:309 ]
+//
+// Deprecated: This API is deprecated in v1.33+.
message EndpointSubset {
// IP addresses which offer the related ports that are marked as ready. These endpoints
// should be considered safe for load balancers and clients to utilize.
@@ -1298,6 +1307,11 @@ message EndpointSubset {
// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
// },
// ]
+//
+// Endpoints is a legacy API and does not contain information about all Service features.
+// Use discoveryv1.EndpointSlice for complete information about Service endpoints.
+//
+// Deprecated: This API is deprecated in v1.33+. Use discoveryv1.EndpointSlice.
message Endpoints {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
@@ -1317,6 +1331,7 @@ message Endpoints {
}
// EndpointsList is a list of endpoints.
+// Deprecated: This API is deprecated in v1.33+.
message EndpointsList {
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
@@ -1327,9 +1342,9 @@ message EndpointsList {
repeated Endpoints items = 2;
}
-// EnvFromSource represents the source of a set of ConfigMaps
+// EnvFromSource represents the source of a set of ConfigMaps or Secrets
message EnvFromSource {
- // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
+ // Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.
// +optional
optional string prefix = 1;
@@ -2198,6 +2213,12 @@ message Lifecycle {
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
optional LifecycleHandler preStop = 2;
+
+ // StopSignal defines which signal will be sent to a container when it is being stopped.
+ // If not specified, the default is defined by the container runtime in use.
+ // StopSignal can only be set for Pods with a non-empty .spec.os.name
+ // +optional
+ optional string stopSignal = 3;
}
// LifecycleHandler defines a specific action that should be taken in a lifecycle
@@ -2862,6 +2883,13 @@ message NodeStatus {
optional NodeFeatures features = 13;
}
+// NodeSwapStatus represents swap memory information.
+message NodeSwapStatus {
+ // Total amount of swap memory in bytes.
+ // +optional
+ optional int64 capacity = 1;
+}
+
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
message NodeSystemInfo {
// MachineID reported by the node. For unique machine identification
@@ -2897,6 +2925,9 @@ message NodeSystemInfo {
// The Architecture reported by the node
optional string architecture = 10;
+
+ // Swap Info reported by the node.
+ optional NodeSwapStatus swap = 11;
}
// ObjectFieldSelector selects an APIVersioned field of an object.
@@ -3615,7 +3646,6 @@ message PodAffinityTerm {
// pod labels will be ignored. The default value is empty.
// The same key is forbidden to exist in both matchLabelKeys and labelSelector.
// Also, matchLabelKeys cannot be set when labelSelector isn't set.
- // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
//
// +listType=atomic
// +optional
@@ -3629,7 +3659,6 @@ message PodAffinityTerm {
// pod labels will be ignored. The default value is empty.
// The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
// Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
//
// +listType=atomic
// +optional
@@ -3702,6 +3731,12 @@ message PodCondition {
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
optional string type = 1;
+ // If set, this represents the .metadata.generation that the pod condition was set based upon.
+ // This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.
+ // +featureGate=PodObservedGenerationTracking
+ // +optional
+ optional int64 observedGeneration = 7;
+
// Status is the status of the condition.
// Can be True, False, Unknown.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
@@ -4138,7 +4173,7 @@ message PodSpec {
// Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
// The resourceRequirements of an init container are taken into account during scheduling
// by finding the highest request/limit for each resource type, and then using the max of
- // of that value or the sum of the normal containers. Limits are applied to init containers
+ // that value or the sum of the normal containers. Limits are applied to init containers
// in a similar fashion.
// Init containers cannot currently be added or removed.
// Cannot be updated.
@@ -4487,6 +4522,12 @@ message PodSpec {
// state of a system, especially if the node that hosts the pod cannot contact the control
// plane.
message PodStatus {
+ // If set, this represents the .metadata.generation that the pod status was set based upon.
+ // This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.
+ // +featureGate=PodObservedGenerationTracking
+ // +optional
+ optional int64 observedGeneration = 17;
+
// The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle.
// The conditions array, the reason and message fields, and the individual container status
// arrays contain more detail about the pod's status.
@@ -4618,6 +4659,9 @@ message PodStatus {
// Status of resources resize desired for pod's containers.
// It is empty if no resources resize is pending.
// Any changes to container resources will automatically set this to "Proposed"
+ // Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress.
+ // PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources.
+ // PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.
// +featureGate=InPlacePodVerticalScaling
// +optional
optional string resize = 14;
@@ -5063,12 +5107,18 @@ message ReplicationControllerSpec {
// Defaults to 1.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
// +optional
+ // +k8s:optional
+ // +default=1
+ // +k8s:minimum=0
optional int32 replicas = 1;
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
+ // +k8s:optional
+ // +default=0
+ // +k8s:minimum=0
optional int32 minReadySeconds = 4;
// Selector is a label query over pods that should match the Replicas count.
@@ -6110,13 +6160,12 @@ message ServiceSpec {
// +optional
optional string internalTrafficPolicy = 22;
- // TrafficDistribution offers a way to express preferences for how traffic is
- // distributed to Service endpoints. Implementations can use this field as a
- // hint, but are not required to guarantee strict adherence. If the field is
- // not set, the implementation will apply its default routing strategy. If set
- // to "PreferClose", implementations should prioritize endpoints that are
- // topologically close (e.g., same zone).
- // This is a beta field and requires enabling ServiceTrafficDistribution feature.
+ // TrafficDistribution offers a way to express preferences for how traffic
+ // is distributed to Service endpoints. Implementations can use this field
+ // as a hint, but are not required to guarantee strict adherence. If the
+ // field is not set, the implementation will apply its default routing
+ // strategy. If set to "PreferClose", implementations should prioritize
+ // endpoints that are in the same zone.
// +featureGate=ServiceTrafficDistribution
// +optional
optional string trafficDistribution = 23;
@@ -6411,7 +6460,6 @@ message TopologySpreadConstraint {
// - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
//
// If this value is nil, the behavior is equivalent to the Honor policy.
- // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
// +optional
optional string nodeAffinityPolicy = 6;
@@ -6422,7 +6470,6 @@ message TopologySpreadConstraint {
// - Ignore: node taints are ignored. All nodes are included.
//
// If this value is nil, the behavior is equivalent to the Ignore policy.
- // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
// +optional
optional string nodeTaintsPolicy = 7;
@@ -6854,7 +6901,7 @@ message VolumeSource {
// The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
// The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
// The volume will be mounted read-only (ro) and non-executable files (noexec).
- // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+ // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
// The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
// +featureGate=ImageVolume
// +optional
diff --git a/vendor/k8s.io/api/core/v1/lifecycle.go b/vendor/k8s.io/api/core/v1/lifecycle.go
index 21ca90e81..21b931b67 100644
--- a/vendor/k8s.io/api/core/v1/lifecycle.go
+++ b/vendor/k8s.io/api/core/v1/lifecycle.go
@@ -16,6 +16,10 @@ limitations under the License.
package v1
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
// APILifecycleIntroduced returns the release in which the API struct was introduced as int versions of major and minor for comparison.
func (in *ComponentStatus) APILifecycleIntroduced() (major, minor int) {
return 1, 0
@@ -35,3 +39,23 @@ func (in *ComponentStatusList) APILifecycleIntroduced() (major, minor int) {
func (in *ComponentStatusList) APILifecycleDeprecated() (major, minor int) {
return 1, 19
}
+
+// APILifecycleDeprecated returns the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+func (in *Endpoints) APILifecycleDeprecated() (major, minor int) {
+ return 1, 33
+}
+
+// APILifecycleReplacement returns the GVK of the replacement for the given API
+func (in *Endpoints) APILifecycleReplacement() schema.GroupVersionKind {
+ return schema.GroupVersionKind{Group: "discovery.k8s.io", Version: "v1", Kind: "EndpointSlice"}
+}
+
+// APILifecycleDeprecated returns the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+func (in *EndpointsList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 33
+}
+
+// APILifecycleReplacement returns the GVK of the replacement for the given API
+func (in *EndpointsList) APILifecycleReplacement() schema.GroupVersionKind {
+ return schema.GroupVersionKind{Group: "discovery.k8s.io", Version: "v1", Kind: "EndpointSliceList"}
+}
diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go
index fb2c1c745..f7641e485 100644
--- a/vendor/k8s.io/api/core/v1/types.go
+++ b/vendor/k8s.io/api/core/v1/types.go
@@ -217,7 +217,7 @@ type VolumeSource struct {
// The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
// The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
// The volume will be mounted read-only (ro) and non-executable files (noexec).
- // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+ // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
// The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
// +featureGate=ImageVolume
// +optional
@@ -2437,9 +2437,9 @@ type SecretKeySelector struct {
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
-// EnvFromSource represents the source of a set of ConfigMaps
+// EnvFromSource represents the source of a set of ConfigMaps or Secrets
type EnvFromSource struct {
- // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
+ // Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.
// +optional
Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"`
// The ConfigMap to select from
@@ -2980,6 +2980,78 @@ type LifecycleHandler struct {
Sleep *SleepAction `json:"sleep,omitempty" protobuf:"bytes,4,opt,name=sleep"`
}
+// Signal defines the stop signal of containers
+// +enum
+type Signal string
+
+const (
+ SIGABRT Signal = "SIGABRT"
+ SIGALRM Signal = "SIGALRM"
+ SIGBUS Signal = "SIGBUS"
+ SIGCHLD Signal = "SIGCHLD"
+ SIGCLD Signal = "SIGCLD"
+ SIGCONT Signal = "SIGCONT"
+ SIGFPE Signal = "SIGFPE"
+ SIGHUP Signal = "SIGHUP"
+ SIGILL Signal = "SIGILL"
+ SIGINT Signal = "SIGINT"
+ SIGIO Signal = "SIGIO"
+ SIGIOT Signal = "SIGIOT"
+ SIGKILL Signal = "SIGKILL"
+ SIGPIPE Signal = "SIGPIPE"
+ SIGPOLL Signal = "SIGPOLL"
+ SIGPROF Signal = "SIGPROF"
+ SIGPWR Signal = "SIGPWR"
+ SIGQUIT Signal = "SIGQUIT"
+ SIGSEGV Signal = "SIGSEGV"
+ SIGSTKFLT Signal = "SIGSTKFLT"
+ SIGSTOP Signal = "SIGSTOP"
+ SIGSYS Signal = "SIGSYS"
+ SIGTERM Signal = "SIGTERM"
+ SIGTRAP Signal = "SIGTRAP"
+ SIGTSTP Signal = "SIGTSTP"
+ SIGTTIN Signal = "SIGTTIN"
+ SIGTTOU Signal = "SIGTTOU"
+ SIGURG Signal = "SIGURG"
+ SIGUSR1 Signal = "SIGUSR1"
+ SIGUSR2 Signal = "SIGUSR2"
+ SIGVTALRM Signal = "SIGVTALRM"
+ SIGWINCH Signal = "SIGWINCH"
+ SIGXCPU Signal = "SIGXCPU"
+ SIGXFSZ Signal = "SIGXFSZ"
+ SIGRTMIN Signal = "SIGRTMIN"
+ SIGRTMINPLUS1 Signal = "SIGRTMIN+1"
+ SIGRTMINPLUS2 Signal = "SIGRTMIN+2"
+ SIGRTMINPLUS3 Signal = "SIGRTMIN+3"
+ SIGRTMINPLUS4 Signal = "SIGRTMIN+4"
+ SIGRTMINPLUS5 Signal = "SIGRTMIN+5"
+ SIGRTMINPLUS6 Signal = "SIGRTMIN+6"
+ SIGRTMINPLUS7 Signal = "SIGRTMIN+7"
+ SIGRTMINPLUS8 Signal = "SIGRTMIN+8"
+ SIGRTMINPLUS9 Signal = "SIGRTMIN+9"
+ SIGRTMINPLUS10 Signal = "SIGRTMIN+10"
+ SIGRTMINPLUS11 Signal = "SIGRTMIN+11"
+ SIGRTMINPLUS12 Signal = "SIGRTMIN+12"
+ SIGRTMINPLUS13 Signal = "SIGRTMIN+13"
+ SIGRTMINPLUS14 Signal = "SIGRTMIN+14"
+ SIGRTMINPLUS15 Signal = "SIGRTMIN+15"
+ SIGRTMAXMINUS14 Signal = "SIGRTMAX-14"
+ SIGRTMAXMINUS13 Signal = "SIGRTMAX-13"
+ SIGRTMAXMINUS12 Signal = "SIGRTMAX-12"
+ SIGRTMAXMINUS11 Signal = "SIGRTMAX-11"
+ SIGRTMAXMINUS10 Signal = "SIGRTMAX-10"
+ SIGRTMAXMINUS9 Signal = "SIGRTMAX-9"
+ SIGRTMAXMINUS8 Signal = "SIGRTMAX-8"
+ SIGRTMAXMINUS7 Signal = "SIGRTMAX-7"
+ SIGRTMAXMINUS6 Signal = "SIGRTMAX-6"
+ SIGRTMAXMINUS5 Signal = "SIGRTMAX-5"
+ SIGRTMAXMINUS4 Signal = "SIGRTMAX-4"
+ SIGRTMAXMINUS3 Signal = "SIGRTMAX-3"
+ SIGRTMAXMINUS2 Signal = "SIGRTMAX-2"
+ SIGRTMAXMINUS1 Signal = "SIGRTMAX-1"
+ SIGRTMAX Signal = "SIGRTMAX"
+)
+
// Lifecycle describes actions that the management system should take in response to container lifecycle
// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
// until the action is complete, unless the container process fails, in which case the handler is aborted.
@@ -3001,6 +3073,11 @@ type Lifecycle struct {
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PreStop *LifecycleHandler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"`
+ // StopSignal defines which signal will be sent to a container when it is being stopped.
+ // If not specified, the default is defined by the container runtime in use.
+ // StopSignal can only be set for Pods with a non-empty .spec.os.name
+ // +optional
+ StopSignal *Signal `json:"stopSignal,omitempty" protobuf:"bytes,3,opt,name=stopSignal"`
}
type ConditionStatus string
@@ -3154,6 +3231,10 @@ type ContainerStatus struct {
// +listType=map
// +listMapKey=name
AllocatedResourcesStatus []ResourceStatus `json:"allocatedResourcesStatus,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,14,rep,name=allocatedResourcesStatus"`
+ // StopSignal reports the effective stop signal for this container
+ // +featureGate=ContainerStopSignals
+ // +optional
+ StopSignal *Signal `json:"stopSignal,omitempty" protobuf:"bytes,15,opt,name=stopSignal"`
}
// ResourceStatus represents the status of a single resource allocated to a Pod.
@@ -3278,6 +3359,17 @@ const (
// PodReadyToStartContainers pod sandbox is successfully configured and
// the pod is ready to launch containers.
PodReadyToStartContainers PodConditionType = "PodReadyToStartContainers"
+ // PodResizePending indicates that the pod has been resized, but kubelet has not
+ // yet allocated the resources. If both PodResizePending and PodResizeInProgress
+ // are set, it means that a new resize was requested in the middle of a previous
+ // pod resize that is still in progress.
+ PodResizePending PodConditionType = "PodResizePending"
+ // PodResizeInProgress indicates that a resize is in progress, and is present whenever
+ // the Kubelet has allocated resources for the resize, but has not yet actuated all of
+ // the required changes.
+ // If both PodResizePending and PodResizeInProgress are set, it means that a new resize was
+ // requested in the middle of a previous pod resize that is still in progress.
+ PodResizeInProgress PodConditionType = "PodResizeInProgress"
)
// These are reasons for a pod's transition to a condition.
@@ -3301,6 +3393,18 @@ const (
// PodReasonPreemptionByScheduler reason in DisruptionTarget pod condition indicates that the
// disruption was initiated by scheduler's preemption.
PodReasonPreemptionByScheduler = "PreemptionByScheduler"
+
+ // PodReasonDeferred reason in PodResizePending pod condition indicates the proposed resize is feasible in
+ // theory (it fits on this node) but is not possible right now.
+ PodReasonDeferred = "Deferred"
+
+ // PodReasonInfeasible reason in PodResizePending pod condition indicates the proposed resize is not
+ // feasible and is rejected; it may not be re-evaluated
+ PodReasonInfeasible = "Infeasible"
+
+ // PodReasonError reason in PodResizeInProgress pod condition indicates that an error occurred while
+ // actuating the resize.
+ PodReasonError = "Error"
)
// PodCondition contains details for the current condition of this pod.
@@ -3308,6 +3412,11 @@ type PodCondition struct {
// Type is the type of the condition.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"`
+ // If set, this represents the .metadata.generation that the pod condition was set based upon.
+ // This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.
+ // +featureGate=PodObservedGenerationTracking
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,7,opt,name=observedGeneration"`
// Status is the status of the condition.
// Can be True, False, Unknown.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
@@ -3326,12 +3435,10 @@ type PodCondition struct {
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
-// PodResizeStatus shows status of desired resize of a pod's containers.
+// Deprecated: PodResizeStatus shows status of desired resize of a pod's containers.
type PodResizeStatus string
const (
- // Pod resources resize has been requested and will be evaluated by node.
- PodResizeStatusProposed PodResizeStatus = "Proposed"
// Pod resources resize has been accepted by node and is being actuated.
PodResizeStatusInProgress PodResizeStatus = "InProgress"
// Node cannot resize the pod at this time and will keep retrying.
@@ -3627,7 +3734,6 @@ type PodAffinityTerm struct {
// pod labels will be ignored. The default value is empty.
// The same key is forbidden to exist in both matchLabelKeys and labelSelector.
// Also, matchLabelKeys cannot be set when labelSelector isn't set.
- // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
//
// +listType=atomic
// +optional
@@ -3640,7 +3746,6 @@ type PodAffinityTerm struct {
// pod labels will be ignored. The default value is empty.
// The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
// Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
- // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
//
// +listType=atomic
// +optional
@@ -3792,7 +3897,7 @@ type PodSpec struct {
// Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
// The resourceRequirements of an init container are taken into account during scheduling
// by finding the highest request/limit for each resource type, and then using the max of
- // of that value or the sum of the normal containers. Limits are applied to init containers
+ // that value or the sum of the normal containers. Limits are applied to init containers
// in a similar fashion.
// Init containers cannot currently be added or removed.
// Cannot be updated.
@@ -4301,7 +4406,6 @@ type TopologySpreadConstraint struct {
// - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
//
// If this value is nil, the behavior is equivalent to the Honor policy.
- // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
// +optional
NodeAffinityPolicy *NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty" protobuf:"bytes,6,opt,name=nodeAffinityPolicy"`
// NodeTaintsPolicy indicates how we will treat node taints when calculating
@@ -4311,7 +4415,6 @@ type TopologySpreadConstraint struct {
// - Ignore: node taints are ignored. All nodes are included.
//
// If this value is nil, the behavior is equivalent to the Ignore policy.
- // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
// +optional
NodeTaintsPolicy *NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty" protobuf:"bytes,7,opt,name=nodeTaintsPolicy"`
// MatchLabelKeys is a set of pod label keys to select the pods over which
@@ -4841,6 +4944,11 @@ type EphemeralContainer struct {
// state of a system, especially if the node that hosts the pod cannot contact the control
// plane.
type PodStatus struct {
+ // If set, this represents the .metadata.generation that the pod status was set based upon.
+ // This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.
+ // +featureGate=PodObservedGenerationTracking
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,17,opt,name=observedGeneration"`
// The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle.
// The conditions array, the reason and message fields, and the individual container status
// arrays contain more detail about the pod's status.
@@ -4968,6 +5076,9 @@ type PodStatus struct {
// Status of resources resize desired for pod's containers.
// It is empty if no resources resize is pending.
// Any changes to container resources will automatically set this to "Proposed"
+ // Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress.
+ // PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources.
+ // PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.
// +featureGate=InPlacePodVerticalScaling
// +optional
Resize PodResizeStatus `json:"resize,omitempty" protobuf:"bytes,14,opt,name=resize,casttype=PodResizeStatus"`
@@ -5099,12 +5210,18 @@ type ReplicationControllerSpec struct {
// Defaults to 1.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
// +optional
+ // +k8s:optional
+ // +default=1
+ // +k8s:minimum=0
Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
+ // +k8s:optional
+ // +default=0
+ // +k8s:minimum=0
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
// Selector is a label query over pods that should match the Replicas count.
@@ -5334,14 +5451,27 @@ const (
// These are valid values for the TrafficDistribution field of a Service.
const (
- // Indicates a preference for routing traffic to endpoints that are
- // topologically proximate to the client. The interpretation of "topologically
- // proximate" may vary across implementations and could encompass endpoints
- // within the same node, rack, zone, or even region. Setting this value gives
- // implementations permission to make different tradeoffs, e.g. optimizing for
- // proximity rather than equal distribution of load. Users should not set this
- // value if such tradeoffs are not acceptable.
+ // Indicates a preference for routing traffic to endpoints that are in the same
+ // zone as the client. Users should not set this value unless they have ensured
+ // that clients and endpoints are distributed in such a way that the "same zone"
+ // preference will not result in endpoints getting overloaded.
ServiceTrafficDistributionPreferClose = "PreferClose"
+
+ // Indicates a preference for routing traffic to endpoints that are in the same
+ // zone as the client. Users should not set this value unless they have ensured
+ // that clients and endpoints are distributed in such a way that the "same zone"
+ // preference will not result in endpoints getting overloaded.
+ // This is an alias for "PreferClose", but it is an Alpha feature and is only
+ // recognized if the PreferSameTrafficDistribution feature gate is enabled.
+ ServiceTrafficDistributionPreferSameZone = "PreferSameZone"
+
+ // Indicates a preference for routing traffic to endpoints that are on the same
+ // node as the client. Users should not set this value unless they have ensured
+ // that clients and endpoints are distributed in such a way that the "same node"
+ // preference will not result in endpoints getting overloaded.
+ // This is an Alpha feature and is only recognized if the
+ // PreferSameTrafficDistribution feature gate is enabled.
+ ServiceTrafficDistributionPreferSameNode = "PreferSameNode"
)
// These are the valid conditions of a service.
@@ -5689,13 +5819,12 @@ type ServiceSpec struct {
// +optional
InternalTrafficPolicy *ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"`
- // TrafficDistribution offers a way to express preferences for how traffic is
- // distributed to Service endpoints. Implementations can use this field as a
- // hint, but are not required to guarantee strict adherence. If the field is
- // not set, the implementation will apply its default routing strategy. If set
- // to "PreferClose", implementations should prioritize endpoints that are
- // topologically close (e.g., same zone).
- // This is a beta field and requires enabling ServiceTrafficDistribution feature.
+ // TrafficDistribution offers a way to express preferences for how traffic
+ // is distributed to Service endpoints. Implementations can use this field
+ // as a hint, but are not required to guarantee strict adherence. If the
+ // field is not set, the implementation will apply its default routing
+ // strategy. If set to "PreferClose", implementations should prioritize
+ // endpoints that are in the same zone.
// +featureGate=ServiceTrafficDistribution
// +optional
TrafficDistribution *string `json:"trafficDistribution,omitempty" protobuf:"bytes,23,opt,name=trafficDistribution"`
@@ -5888,6 +6017,11 @@ type ServiceAccountList struct {
// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
// },
// ]
+//
+// Endpoints is a legacy API and does not contain information about all Service features.
+// Use discoveryv1.EndpointSlice for complete information about Service endpoints.
+//
+// Deprecated: This API is deprecated in v1.33+. Use discoveryv1.EndpointSlice.
type Endpoints struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
@@ -5920,6 +6054,8 @@ type Endpoints struct {
//
// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
// b: [ 10.10.1.1:309, 10.10.2.2:309 ]
+//
+// Deprecated: This API is deprecated in v1.33+.
type EndpointSubset struct {
// IP addresses which offer the related ports that are marked as ready. These endpoints
// should be considered safe for load balancers and clients to utilize.
@@ -5939,6 +6075,7 @@ type EndpointSubset struct {
}
// EndpointAddress is a tuple that describes single IP address.
+// Deprecated: This API is deprecated in v1.33+.
// +structType=atomic
type EndpointAddress struct {
// The IP of this endpoint.
@@ -5957,6 +6094,7 @@ type EndpointAddress struct {
}
// EndpointPort is a tuple that describes a single port.
+// Deprecated: This API is deprecated in v1.33+.
// +structType=atomic
type EndpointPort struct {
// The name of this port. This must match the 'name' field in the
@@ -5998,6 +6136,7 @@ type EndpointPort struct {
// +k8s:prerelease-lifecycle-gen:introduced=1.0
// EndpointsList is a list of endpoints.
+// Deprecated: This API is deprecated in v1.33+.
type EndpointsList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
@@ -6166,6 +6305,15 @@ type NodeSystemInfo struct {
OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"`
+ // Swap Info reported by the node.
+ Swap *NodeSwapStatus `json:"swap,omitempty" protobuf:"bytes,11,opt,name=swap"`
+}
+
+// NodeSwapStatus represents swap memory information.
+type NodeSwapStatus struct {
+ // Total amount of swap memory in bytes.
+ // +optional
+ Capacity *int64 `json:"capacity,omitempty" protobuf:"varint,1,opt,name=capacity"`
}
// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
@@ -7267,6 +7415,9 @@ const (
ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass"
// Match all pod objects that have cross-namespace pod (anti)affinity mentioned.
ResourceQuotaScopeCrossNamespacePodAffinity ResourceQuotaScope = "CrossNamespacePodAffinity"
+
+ // Match all pvc objects that have volume attributes class mentioned.
+ ResourceQuotaScopeVolumeAttributesClass ResourceQuotaScope = "VolumeAttributesClass"
)
// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
index 89ce3d230..9e987eefd 100644
--- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
@@ -474,6 +474,7 @@ var map_ContainerStatus = map[string]string{
"volumeMounts": "Status of volume mounts.",
"user": "User represents user identity information initially attached to the first process of the container",
"allocatedResourcesStatus": "AllocatedResourcesStatus represents the status of various resources allocated for this Pod.",
+ "stopSignal": "StopSignal reports the effective stop signal for this container",
}
func (ContainerStatus) SwaggerDoc() map[string]string {
@@ -540,7 +541,7 @@ func (EmptyDirVolumeSource) SwaggerDoc() map[string]string {
}
var map_EndpointAddress = map[string]string{
- "": "EndpointAddress is a tuple that describes single IP address.",
+ "": "EndpointAddress is a tuple that describes single IP address. Deprecated: This API is deprecated in v1.33+.",
"ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16).",
"hostname": "The Hostname of this endpoint",
"nodeName": "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.",
@@ -552,7 +553,7 @@ func (EndpointAddress) SwaggerDoc() map[string]string {
}
var map_EndpointPort = map[string]string{
- "": "EndpointPort is a tuple that describes a single port.",
+ "": "EndpointPort is a tuple that describes a single port. Deprecated: This API is deprecated in v1.33+.",
"name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.",
"port": "The port number of the endpoint.",
"protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
@@ -564,7 +565,7 @@ func (EndpointPort) SwaggerDoc() map[string]string {
}
var map_EndpointSubset = map[string]string{
- "": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n\n\t{\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t}\n\nThe resulting set of endpoints can be viewed as:\n\n\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]",
+ "": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n\n\t{\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t}\n\nThe resulting set of endpoints can be viewed as:\n\n\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]\n\nDeprecated: This API is deprecated in v1.33+.",
"addresses": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.",
"notReadyAddresses": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.",
"ports": "Port numbers available on the related IP addresses.",
@@ -575,7 +576,7 @@ func (EndpointSubset) SwaggerDoc() map[string]string {
}
var map_Endpoints = map[string]string{
- "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n\n\t Name: \"mysvc\",\n\t Subsets: [\n\t {\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t },\n\t {\n\t Addresses: [{\"ip\": \"10.10.3.3\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n\t },\n\t]",
+ "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n\n\t Name: \"mysvc\",\n\t Subsets: [\n\t {\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t },\n\t {\n\t Addresses: [{\"ip\": \"10.10.3.3\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n\t },\n\t]\n\nEndpoints is a legacy API and does not contain information about all Service features. Use discoveryv1.EndpointSlice for complete information about Service endpoints.\n\nDeprecated: This API is deprecated in v1.33+. Use discoveryv1.EndpointSlice.",
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.",
}
@@ -585,7 +586,7 @@ func (Endpoints) SwaggerDoc() map[string]string {
}
var map_EndpointsList = map[string]string{
- "": "EndpointsList is a list of endpoints.",
+ "": "EndpointsList is a list of endpoints. Deprecated: This API is deprecated in v1.33+.",
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
"items": "List of endpoints.",
}
@@ -595,8 +596,8 @@ func (EndpointsList) SwaggerDoc() map[string]string {
}
var map_EnvFromSource = map[string]string{
- "": "EnvFromSource represents the source of a set of ConfigMaps",
- "prefix": "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.",
+ "": "EnvFromSource represents the source of a set of ConfigMaps or Secrets",
+ "prefix": "Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.",
"configMapRef": "The ConfigMap to select from",
"secretRef": "The Secret to select from",
}
@@ -957,9 +958,10 @@ func (KeyToPath) SwaggerDoc() map[string]string {
}
var map_Lifecycle = map[string]string{
- "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.",
- "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
- "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
+ "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.",
+ "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
+ "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
+ "stopSignal": "StopSignal defines which signal will be sent to a container when it is being stopped. If not specified, the default is defined by the container runtime in use. StopSignal can only be set for Pods with a non-empty .spec.os.name",
}
func (Lifecycle) SwaggerDoc() map[string]string {
@@ -1335,6 +1337,15 @@ func (NodeStatus) SwaggerDoc() map[string]string {
return map_NodeStatus
}
+var map_NodeSwapStatus = map[string]string{
+ "": "NodeSwapStatus represents swap memory information.",
+ "capacity": "Total amount of swap memory in bytes.",
+}
+
+func (NodeSwapStatus) SwaggerDoc() map[string]string {
+ return map_NodeSwapStatus
+}
+
var map_NodeSystemInfo = map[string]string{
"": "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.",
"machineID": "MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html",
@@ -1347,6 +1358,7 @@ var map_NodeSystemInfo = map[string]string{
"kubeProxyVersion": "Deprecated: KubeProxy Version reported by the node.",
"operatingSystem": "The Operating System reported by the node",
"architecture": "The Architecture reported by the node",
+ "swap": "Swap Info reported by the node.",
}
func (NodeSystemInfo) SwaggerDoc() map[string]string {
@@ -1583,8 +1595,8 @@ var map_PodAffinityTerm = map[string]string{
"namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".",
"topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.",
"namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.",
- "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).",
- "mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).",
+ "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set.",
+ "mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set.",
}
func (PodAffinityTerm) SwaggerDoc() map[string]string {
@@ -1617,6 +1629,7 @@ func (PodAttachOptions) SwaggerDoc() map[string]string {
var map_PodCondition = map[string]string{
"": "PodCondition contains details for the current condition of this pod.",
"type": "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
+ "observedGeneration": "If set, this represents the .metadata.generation that the pod condition was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.",
"status": "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
"lastProbeTime": "Last time we probed the condition.",
"lastTransitionTime": "Last time the condition transitioned from one status to another.",
@@ -1799,7 +1812,7 @@ func (PodSignature) SwaggerDoc() map[string]string {
var map_PodSpec = map[string]string{
"": "PodSpec is a description of a pod.",
"volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes",
- "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/",
+ "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/",
"containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.",
"ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.",
"restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy",
@@ -1846,6 +1859,7 @@ func (PodSpec) SwaggerDoc() map[string]string {
var map_PodStatus = map[string]string{
"": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.",
+ "observedGeneration": "If set, this represents the .metadata.generation that the pod status was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.",
"phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase",
"conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
"message": "A human readable message indicating details about why the pod is in this condition.",
@@ -1860,7 +1874,7 @@ var map_PodStatus = map[string]string{
"containerStatuses": "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
"qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes",
"ephemeralContainerStatuses": "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
- "resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\"",
+ "resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\" Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress. PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources. PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.",
"resourceClaimStatuses": "Status of resource claims.",
}
@@ -2487,7 +2501,7 @@ var map_ServiceSpec = map[string]string{
"allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type.",
"loadBalancerClass": "loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.",
"internalTrafficPolicy": "InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \"Local\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features).",
- "trafficDistribution": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is a beta field and requires enabling ServiceTrafficDistribution feature.",
+ "trafficDistribution": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are in the same zone.",
}
func (ServiceSpec) SwaggerDoc() map[string]string {
@@ -2619,8 +2633,8 @@ var map_TopologySpreadConstraint = map[string]string{
"whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ",
"labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.",
"minDomains": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: ",
- "nodeAffinityPolicy": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
- "nodeTaintsPolicy": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
+ "nodeAffinityPolicy": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy.",
+ "nodeTaintsPolicy": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy.",
"matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.\n\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).",
}
@@ -2760,7 +2774,7 @@ var map_VolumeSource = map[string]string{
"storageos": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.",
"csi": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.",
"ephemeral": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.",
- "image": "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.",
+ "image": "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.",
}
func (VolumeSource) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
index 3f669092e..619c52542 100644
--- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
@@ -1055,6 +1055,11 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.StopSignal != nil {
+ in, out := &in.StopSignal, &out.StopSignal
+ *out = new(Signal)
+ **out = **in
+ }
return
}
@@ -2101,6 +2106,11 @@ func (in *Lifecycle) DeepCopyInto(out *Lifecycle) {
*out = new(LifecycleHandler)
(*in).DeepCopyInto(*out)
}
+ if in.StopSignal != nil {
+ in, out := &in.StopSignal, &out.StopSignal
+ *out = new(Signal)
+ **out = **in
+ }
return
}
@@ -3002,7 +3012,7 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
copy(*out, *in)
}
out.DaemonEndpoints = in.DaemonEndpoints
- out.NodeInfo = in.NodeInfo
+ in.NodeInfo.DeepCopyInto(&out.NodeInfo)
if in.Images != nil {
in, out := &in.Images, &out.Images
*out = make([]ContainerImage, len(*in))
@@ -3050,9 +3060,35 @@ func (in *NodeStatus) DeepCopy() *NodeStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeSwapStatus) DeepCopyInto(out *NodeSwapStatus) {
+ *out = *in
+ if in.Capacity != nil {
+ in, out := &in.Capacity, &out.Capacity
+ *out = new(int64)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSwapStatus.
+func (in *NodeSwapStatus) DeepCopy() *NodeSwapStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeSwapStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) {
*out = *in
+ if in.Swap != nil {
+ in, out := &in.Swap, &out.Swap
+ *out = new(NodeSwapStatus)
+ (*in).DeepCopyInto(*out)
+ }
return
}
diff --git a/vendor/k8s.io/api/discovery/v1/doc.go b/vendor/k8s.io/api/discovery/v1/doc.go
index 01913669f..43e30b7f4 100644
--- a/vendor/k8s.io/api/discovery/v1/doc.go
+++ b/vendor/k8s.io/api/discovery/v1/doc.go
@@ -20,4 +20,4 @@ limitations under the License.
// +k8s:prerelease-lifecycle-gen=true
// +groupName=discovery.k8s.io
-package v1 // import "k8s.io/api/discovery/v1"
+package v1
diff --git a/vendor/k8s.io/api/discovery/v1/generated.pb.go b/vendor/k8s.io/api/discovery/v1/generated.pb.go
index 5792481dc..443ff8f8f 100644
--- a/vendor/k8s.io/api/discovery/v1/generated.pb.go
+++ b/vendor/k8s.io/api/discovery/v1/generated.pb.go
@@ -214,10 +214,38 @@ func (m *EndpointSliceList) XXX_DiscardUnknown() {
var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo
+func (m *ForNode) Reset() { *m = ForNode{} }
+func (*ForNode) ProtoMessage() {}
+func (*ForNode) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2237b452324cf77e, []int{6}
+}
+func (m *ForNode) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ForNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ForNode) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ForNode.Merge(m, src)
+}
+func (m *ForNode) XXX_Size() int {
+ return m.Size()
+}
+func (m *ForNode) XXX_DiscardUnknown() {
+ xxx_messageInfo_ForNode.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ForNode proto.InternalMessageInfo
+
func (m *ForZone) Reset() { *m = ForZone{} }
func (*ForZone) ProtoMessage() {}
func (*ForZone) Descriptor() ([]byte, []int) {
- return fileDescriptor_2237b452324cf77e, []int{6}
+ return fileDescriptor_2237b452324cf77e, []int{7}
}
func (m *ForZone) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -250,6 +278,7 @@ func init() {
proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.discovery.v1.EndpointPort")
proto.RegisterType((*EndpointSlice)(nil), "k8s.io.api.discovery.v1.EndpointSlice")
proto.RegisterType((*EndpointSliceList)(nil), "k8s.io.api.discovery.v1.EndpointSliceList")
+ proto.RegisterType((*ForNode)(nil), "k8s.io.api.discovery.v1.ForNode")
proto.RegisterType((*ForZone)(nil), "k8s.io.api.discovery.v1.ForZone")
}
@@ -258,62 +287,64 @@ func init() {
}
var fileDescriptor_2237b452324cf77e = []byte{
- // 877 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4d, 0x6f, 0xdc, 0x44,
- 0x18, 0x5e, 0x67, 0x63, 0x62, 0x8f, 0x13, 0xd1, 0x8e, 0x90, 0x62, 0x2d, 0xc8, 0x5e, 0x8c, 0x0a,
- 0x2b, 0x45, 0x78, 0x49, 0x84, 0x50, 0x41, 0xe2, 0x10, 0xd3, 0xd0, 0xf2, 0x15, 0xa2, 0x69, 0x4e,
- 0x15, 0x52, 0x71, 0xec, 0x37, 0x5e, 0x93, 0xd8, 0x63, 0x79, 0x26, 0x2b, 0x2d, 0x27, 0x2e, 0x9c,
- 0xe1, 0x17, 0x71, 0x44, 0x39, 0xf6, 0x46, 0x4f, 0x16, 0x31, 0x7f, 0x81, 0x53, 0x4f, 0x68, 0xc6,
- 0x9f, 0x61, 0xb3, 0xda, 0xde, 0x3c, 0xcf, 0x3c, 0xcf, 0xfb, 0xf1, 0xcc, 0xcc, 0x6b, 0xf4, 0xc1,
- 0xc5, 0x43, 0xe6, 0xc6, 0x74, 0xea, 0x67, 0xf1, 0x34, 0x8c, 0x59, 0x40, 0xe7, 0x90, 0x2f, 0xa6,
- 0xf3, 0xfd, 0x69, 0x04, 0x29, 0xe4, 0x3e, 0x87, 0xd0, 0xcd, 0x72, 0xca, 0x29, 0xde, 0xad, 0x88,
- 0xae, 0x9f, 0xc5, 0x6e, 0x4b, 0x74, 0xe7, 0xfb, 0xa3, 0x0f, 0xa3, 0x98, 0xcf, 0xae, 0xce, 0xdc,
- 0x80, 0x26, 0xd3, 0x88, 0x46, 0x74, 0x2a, 0xf9, 0x67, 0x57, 0xe7, 0x72, 0x25, 0x17, 0xf2, 0xab,
- 0x8a, 0x33, 0x72, 0x7a, 0x09, 0x03, 0x9a, 0xc3, 0x1d, 0xb9, 0x46, 0x1f, 0x77, 0x9c, 0xc4, 0x0f,
- 0x66, 0x71, 0x2a, 0x6a, 0xca, 0x2e, 0x22, 0x01, 0xb0, 0x69, 0x02, 0xdc, 0xbf, 0x4b, 0x35, 0x5d,
- 0xa5, 0xca, 0xaf, 0x52, 0x1e, 0x27, 0xb0, 0x24, 0xf8, 0x64, 0x9d, 0x80, 0x05, 0x33, 0x48, 0xfc,
- 0xff, 0xeb, 0x9c, 0x7f, 0x37, 0x91, 0x76, 0x94, 0x86, 0x19, 0x8d, 0x53, 0x8e, 0xf7, 0x90, 0xee,
- 0x87, 0x61, 0x0e, 0x8c, 0x01, 0x33, 0x95, 0xf1, 0x70, 0xa2, 0x7b, 0x3b, 0x65, 0x61, 0xeb, 0x87,
- 0x0d, 0x48, 0xba, 0x7d, 0xfc, 0x1c, 0xa1, 0x80, 0xa6, 0x61, 0xcc, 0x63, 0x9a, 0x32, 0x73, 0x63,
- 0xac, 0x4c, 0x8c, 0x83, 0x3d, 0x77, 0x85, 0xb3, 0x6e, 0x93, 0xe3, 0x8b, 0x56, 0xe2, 0xe1, 0xeb,
- 0xc2, 0x1e, 0x94, 0x85, 0x8d, 0x3a, 0x8c, 0xf4, 0x42, 0xe2, 0x09, 0xd2, 0x66, 0x94, 0xf1, 0xd4,
- 0x4f, 0xc0, 0x1c, 0x8e, 0x95, 0x89, 0xee, 0x6d, 0x97, 0x85, 0xad, 0x3d, 0xa9, 0x31, 0xd2, 0xee,
- 0xe2, 0x13, 0xa4, 0x73, 0x3f, 0x8f, 0x80, 0x13, 0x38, 0x37, 0x37, 0x65, 0x25, 0xef, 0xf5, 0x2b,
- 0x11, 0x67, 0x23, 0x8a, 0xf8, 0xfe, 0xec, 0x27, 0x08, 0x04, 0x09, 0x72, 0x48, 0x03, 0xa8, 0x9a,
- 0x3b, 0x6d, 0x94, 0xa4, 0x0b, 0x82, 0x7f, 0x55, 0x10, 0x0e, 0x21, 0xcb, 0x21, 0x10, 0x5e, 0x9d,
- 0xd2, 0x8c, 0x5e, 0xd2, 0x68, 0x61, 0xaa, 0xe3, 0xe1, 0xc4, 0x38, 0xf8, 0x74, 0x6d, 0x97, 0xee,
- 0xa3, 0x25, 0xed, 0x51, 0xca, 0xf3, 0x85, 0x37, 0xaa, 0x7b, 0xc6, 0xcb, 0x04, 0x72, 0x47, 0x42,
- 0xe1, 0x41, 0x4a, 0x43, 0x38, 0x16, 0x1e, 0xbc, 0xd1, 0x79, 0x70, 0x5c, 0x63, 0xa4, 0xdd, 0xc5,
- 0xef, 0xa0, 0xcd, 0x9f, 0x69, 0x0a, 0xe6, 0x96, 0x64, 0x69, 0x65, 0x61, 0x6f, 0x3e, 0xa3, 0x29,
- 0x10, 0x89, 0xe2, 0xc7, 0x48, 0x9d, 0xc5, 0x29, 0x67, 0xa6, 0x26, 0xdd, 0x79, 0x7f, 0x6d, 0x07,
- 0x4f, 0x04, 0xdb, 0xd3, 0xcb, 0xc2, 0x56, 0xe5, 0x27, 0xa9, 0xf4, 0xa3, 0x23, 0xb4, 0xbb, 0xa2,
- 0x37, 0x7c, 0x0f, 0x0d, 0x2f, 0x60, 0x61, 0x2a, 0xa2, 0x00, 0x22, 0x3e, 0xf1, 0x5b, 0x48, 0x9d,
- 0xfb, 0x97, 0x57, 0x20, 0x6f, 0x87, 0x4e, 0xaa, 0xc5, 0x67, 0x1b, 0x0f, 0x15, 0xe7, 0x37, 0x05,
- 0xe1, 0xe5, 0x2b, 0x81, 0x6d, 0xa4, 0xe6, 0xe0, 0x87, 0x55, 0x10, 0xad, 0x4a, 0x4f, 0x04, 0x40,
- 0x2a, 0x1c, 0x3f, 0x40, 0x5b, 0x0c, 0xf2, 0x79, 0x9c, 0x46, 0x32, 0xa6, 0xe6, 0x19, 0x65, 0x61,
- 0x6f, 0x3d, 0xad, 0x20, 0xd2, 0xec, 0xe1, 0x7d, 0x64, 0x70, 0xc8, 0x93, 0x38, 0xf5, 0xb9, 0xa0,
- 0x0e, 0x25, 0xf5, 0xcd, 0xb2, 0xb0, 0x8d, 0xd3, 0x0e, 0x26, 0x7d, 0x8e, 0xf3, 0x1c, 0xed, 0xdc,
- 0xea, 0x1d, 0x1f, 0x23, 0xed, 0x9c, 0xe6, 0xc2, 0xc3, 0xea, 0x2d, 0x18, 0x07, 0xe3, 0x95, 0xae,
- 0x7d, 0x59, 0x11, 0xbd, 0x7b, 0xf5, 0xf1, 0x6a, 0x35, 0xc0, 0x48, 0x1b, 0xc3, 0xf9, 0x53, 0x41,
- 0xdb, 0x4d, 0x86, 0x13, 0x9a, 0x73, 0x71, 0x62, 0xf2, 0x6e, 0x2b, 0xdd, 0x89, 0xc9, 0x33, 0x95,
- 0x28, 0x7e, 0x8c, 0x34, 0xf9, 0x42, 0x03, 0x7a, 0x59, 0xd9, 0xe7, 0xed, 0x89, 0xc0, 0x27, 0x35,
- 0xf6, 0xaa, 0xb0, 0xdf, 0x5e, 0x9e, 0x3e, 0x6e, 0xb3, 0x4d, 0x5a, 0xb1, 0x48, 0x93, 0xd1, 0x9c,
- 0x4b, 0x13, 0xd4, 0x2a, 0x8d, 0x48, 0x4f, 0x24, 0x2a, 0x9c, 0xf2, 0xb3, 0xac, 0x91, 0xc9, 0xc7,
- 0xa3, 0x57, 0x4e, 0x1d, 0x76, 0x30, 0xe9, 0x73, 0x9c, 0xbf, 0x36, 0x3a, 0xab, 0x9e, 0x5e, 0xc6,
- 0x01, 0xe0, 0x1f, 0x91, 0x26, 0x06, 0x59, 0xe8, 0x73, 0x5f, 0x76, 0x63, 0x1c, 0x7c, 0xd4, 0xb3,
- 0xaa, 0x9d, 0x47, 0x6e, 0x76, 0x11, 0x09, 0x80, 0xb9, 0x82, 0xdd, 0x3d, 0xc8, 0xef, 0x80, 0xfb,
- 0xdd, 0x34, 0xe8, 0x30, 0xd2, 0x46, 0xc5, 0x8f, 0x90, 0x51, 0x4f, 0x9e, 0xd3, 0x45, 0x06, 0x75,
- 0x99, 0x4e, 0x2d, 0x31, 0x0e, 0xbb, 0xad, 0x57, 0xb7, 0x97, 0xa4, 0x2f, 0xc3, 0x04, 0xe9, 0x50,
- 0x17, 0x2e, 0x26, 0x96, 0x38, 0xd3, 0x77, 0xd7, 0xbe, 0x04, 0xef, 0x7e, 0x9d, 0x46, 0x6f, 0x10,
- 0x46, 0xba, 0x30, 0xf8, 0x6b, 0xa4, 0x0a, 0x23, 0x99, 0x39, 0x94, 0xf1, 0x1e, 0xac, 0x8d, 0x27,
- 0xcc, 0xf7, 0x76, 0xea, 0x98, 0xaa, 0x58, 0x31, 0x52, 0x85, 0x70, 0xfe, 0x50, 0xd0, 0xfd, 0x5b,
- 0xce, 0x7e, 0x1b, 0x33, 0x8e, 0x7f, 0x58, 0x72, 0xd7, 0x7d, 0x3d, 0x77, 0x85, 0x5a, 0x7a, 0xdb,
- 0x5e, 0xcb, 0x06, 0xe9, 0x39, 0xfb, 0x0d, 0x52, 0x63, 0x0e, 0x49, 0xe3, 0xc7, 0xfa, 0xc9, 0x20,
- 0x0b, 0xeb, 0x1a, 0xf8, 0x4a, 0x88, 0x49, 0x15, 0xc3, 0xd9, 0x43, 0x5b, 0xf5, 0xcd, 0xc7, 0xe3,
- 0x5b, 0xb7, 0x7b, 0xbb, 0xa6, 0xf7, 0x6e, 0xb8, 0xf7, 0xf9, 0xf5, 0x8d, 0x35, 0x78, 0x71, 0x63,
- 0x0d, 0x5e, 0xde, 0x58, 0x83, 0x5f, 0x4a, 0x4b, 0xb9, 0x2e, 0x2d, 0xe5, 0x45, 0x69, 0x29, 0x2f,
- 0x4b, 0x4b, 0xf9, 0xbb, 0xb4, 0x94, 0xdf, 0xff, 0xb1, 0x06, 0xcf, 0x76, 0x57, 0xfc, 0xd4, 0xff,
- 0x0b, 0x00, 0x00, 0xff, 0xff, 0x76, 0x4b, 0x26, 0xe3, 0xee, 0x07, 0x00, 0x00,
+ // 902 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xcf, 0x6f, 0xe3, 0x44,
+ 0x14, 0x8e, 0x9b, 0x9a, 0xda, 0xe3, 0x56, 0xec, 0x8e, 0x90, 0x6a, 0x05, 0x64, 0x07, 0xa3, 0x85,
+ 0x48, 0x15, 0x0e, 0xad, 0x10, 0x5a, 0x90, 0x38, 0xd4, 0x6c, 0xd9, 0xe5, 0x57, 0xa9, 0x66, 0x7b,
+ 0x5a, 0x21, 0x81, 0x6b, 0xbf, 0x3a, 0xa6, 0x8d, 0xc7, 0xf2, 0x4c, 0x22, 0x85, 0x13, 0x17, 0xce,
+ 0xf0, 0x9f, 0xf0, 0x1f, 0x70, 0x44, 0x3d, 0xee, 0x8d, 0x3d, 0x59, 0xd4, 0xfc, 0x0b, 0x9c, 0xf6,
+ 0x84, 0x66, 0xfc, 0x33, 0xa4, 0x51, 0xf6, 0xe6, 0xf9, 0xe6, 0x7b, 0xdf, 0x7b, 0xf3, 0xcd, 0x7b,
+ 0x23, 0xa3, 0xf7, 0xae, 0x1e, 0x32, 0x37, 0xa6, 0x63, 0x3f, 0x8d, 0xc7, 0x61, 0xcc, 0x02, 0x3a,
+ 0x87, 0x6c, 0x31, 0x9e, 0x1f, 0x8e, 0x23, 0x48, 0x20, 0xf3, 0x39, 0x84, 0x6e, 0x9a, 0x51, 0x4e,
+ 0xf1, 0x7e, 0x49, 0x74, 0xfd, 0x34, 0x76, 0x1b, 0xa2, 0x3b, 0x3f, 0x1c, 0xbc, 0x1f, 0xc5, 0x7c,
+ 0x32, 0xbb, 0x70, 0x03, 0x3a, 0x1d, 0x47, 0x34, 0xa2, 0x63, 0xc9, 0xbf, 0x98, 0x5d, 0xca, 0x95,
+ 0x5c, 0xc8, 0xaf, 0x52, 0x67, 0xe0, 0x74, 0x12, 0x06, 0x34, 0x83, 0x3b, 0x72, 0x0d, 0x3e, 0x6c,
+ 0x39, 0x53, 0x3f, 0x98, 0xc4, 0x89, 0xa8, 0x29, 0xbd, 0x8a, 0x04, 0xc0, 0xc6, 0x53, 0xe0, 0xfe,
+ 0x5d, 0x51, 0xe3, 0x75, 0x51, 0xd9, 0x2c, 0xe1, 0xf1, 0x14, 0x56, 0x02, 0x3e, 0xda, 0x14, 0xc0,
+ 0x82, 0x09, 0x4c, 0xfd, 0xff, 0xc7, 0x39, 0xff, 0x6e, 0x23, 0xed, 0x24, 0x09, 0x53, 0x1a, 0x27,
+ 0x1c, 0x1f, 0x20, 0xdd, 0x0f, 0xc3, 0x0c, 0x18, 0x03, 0x66, 0x2a, 0xc3, 0xfe, 0x48, 0xf7, 0xf6,
+ 0x8a, 0xdc, 0xd6, 0x8f, 0x6b, 0x90, 0xb4, 0xfb, 0xf8, 0x7b, 0x84, 0x02, 0x9a, 0x84, 0x31, 0x8f,
+ 0x69, 0xc2, 0xcc, 0xad, 0xa1, 0x32, 0x32, 0x8e, 0x0e, 0xdc, 0x35, 0xce, 0xba, 0x75, 0x8e, 0xcf,
+ 0x9a, 0x10, 0x0f, 0xdf, 0xe4, 0x76, 0xaf, 0xc8, 0x6d, 0xd4, 0x62, 0xa4, 0x23, 0x89, 0x47, 0x48,
+ 0x9b, 0x50, 0xc6, 0x13, 0x7f, 0x0a, 0x66, 0x7f, 0xa8, 0x8c, 0x74, 0x6f, 0xb7, 0xc8, 0x6d, 0xed,
+ 0x49, 0x85, 0x91, 0x66, 0x17, 0x9f, 0x21, 0x9d, 0xfb, 0x59, 0x04, 0x9c, 0xc0, 0xa5, 0xb9, 0x2d,
+ 0x2b, 0x79, 0xa7, 0x5b, 0x89, 0xb8, 0x1b, 0x51, 0xc4, 0xb7, 0x17, 0x3f, 0x42, 0x20, 0x48, 0x90,
+ 0x41, 0x12, 0x40, 0x79, 0xb8, 0xf3, 0x3a, 0x92, 0xb4, 0x22, 0xf8, 0x17, 0x05, 0xe1, 0x10, 0xd2,
+ 0x0c, 0x02, 0xe1, 0xd5, 0x39, 0x4d, 0xe9, 0x35, 0x8d, 0x16, 0xa6, 0x3a, 0xec, 0x8f, 0x8c, 0xa3,
+ 0x8f, 0x37, 0x9e, 0xd2, 0x7d, 0xb4, 0x12, 0x7b, 0x92, 0xf0, 0x6c, 0xe1, 0x0d, 0xaa, 0x33, 0xe3,
+ 0x55, 0x02, 0xb9, 0x23, 0xa1, 0xf0, 0x20, 0xa1, 0x21, 0x9c, 0x0a, 0x0f, 0x5e, 0x6b, 0x3d, 0x38,
+ 0xad, 0x30, 0xd2, 0xec, 0xe2, 0xb7, 0xd0, 0xf6, 0x4f, 0x34, 0x01, 0x73, 0x47, 0xb2, 0xb4, 0x22,
+ 0xb7, 0xb7, 0x9f, 0xd1, 0x04, 0x88, 0x44, 0xf1, 0x63, 0xa4, 0x4e, 0xe2, 0x84, 0x33, 0x53, 0x93,
+ 0xee, 0xbc, 0xbb, 0xf1, 0x04, 0x4f, 0x04, 0xdb, 0xd3, 0x8b, 0xdc, 0x56, 0xe5, 0x27, 0x29, 0xe3,
+ 0x07, 0x27, 0x68, 0x7f, 0xcd, 0xd9, 0xf0, 0x3d, 0xd4, 0xbf, 0x82, 0x85, 0xa9, 0x88, 0x02, 0x88,
+ 0xf8, 0xc4, 0x6f, 0x20, 0x75, 0xee, 0x5f, 0xcf, 0x40, 0x76, 0x87, 0x4e, 0xca, 0xc5, 0x27, 0x5b,
+ 0x0f, 0x15, 0xe7, 0x57, 0x05, 0xe1, 0xd5, 0x96, 0xc0, 0x36, 0x52, 0x33, 0xf0, 0xc3, 0x52, 0x44,
+ 0x2b, 0xd3, 0x13, 0x01, 0x90, 0x12, 0xc7, 0x0f, 0xd0, 0x0e, 0x83, 0x6c, 0x1e, 0x27, 0x91, 0xd4,
+ 0xd4, 0x3c, 0xa3, 0xc8, 0xed, 0x9d, 0xa7, 0x25, 0x44, 0xea, 0x3d, 0x7c, 0x88, 0x0c, 0x0e, 0xd9,
+ 0x34, 0x4e, 0x7c, 0x2e, 0xa8, 0x7d, 0x49, 0x7d, 0xbd, 0xc8, 0x6d, 0xe3, 0xbc, 0x85, 0x49, 0x97,
+ 0xe3, 0xfc, 0xae, 0xa0, 0xbd, 0xa5, 0xc3, 0xe3, 0x53, 0xa4, 0x5d, 0xd2, 0x4c, 0x98, 0x58, 0x0e,
+ 0x83, 0x71, 0x34, 0x5c, 0x6b, 0xdb, 0xe7, 0x25, 0xd1, 0xbb, 0x57, 0xdd, 0xaf, 0x56, 0x01, 0x8c,
+ 0x34, 0x1a, 0x95, 0x9e, 0xb8, 0x3a, 0x31, 0x2e, 0x1b, 0xf5, 0x04, 0x71, 0x49, 0x4f, 0x46, 0x92,
+ 0x46, 0xc3, 0xf9, 0x53, 0x41, 0xbb, 0x75, 0xc5, 0x67, 0x34, 0xe3, 0xa2, 0x05, 0xe4, 0xb0, 0x28,
+ 0x6d, 0x0b, 0xc8, 0x26, 0x91, 0x28, 0x7e, 0x8c, 0x34, 0x39, 0xf2, 0x01, 0xbd, 0x2e, 0xef, 0xc3,
+ 0x3b, 0x10, 0xc2, 0x67, 0x15, 0xf6, 0x32, 0xb7, 0xdf, 0x5c, 0x7d, 0xce, 0xdc, 0x7a, 0x9b, 0x34,
+ 0xc1, 0x22, 0x4d, 0x4a, 0x33, 0x2e, 0x5d, 0x55, 0xcb, 0x34, 0x22, 0x3d, 0x91, 0xa8, 0xb0, 0xde,
+ 0x4f, 0xd3, 0x3a, 0x4c, 0x4e, 0xa3, 0x5e, 0x5a, 0x7f, 0xdc, 0xc2, 0xa4, 0xcb, 0x71, 0xfe, 0xda,
+ 0x6a, 0xad, 0x7f, 0x7a, 0x1d, 0x07, 0x80, 0x7f, 0x40, 0x9a, 0x78, 0x19, 0x43, 0x9f, 0xfb, 0xf2,
+ 0x34, 0xc6, 0xd1, 0x07, 0x1d, 0xab, 0x9a, 0x07, 0xce, 0x4d, 0xaf, 0x22, 0x01, 0x30, 0x57, 0xb0,
+ 0xdb, 0x09, 0xff, 0x06, 0xb8, 0xdf, 0x3e, 0x2f, 0x2d, 0x46, 0x1a, 0x55, 0xfc, 0x08, 0x19, 0xd5,
+ 0x53, 0x76, 0xbe, 0x48, 0xa1, 0x2a, 0xd3, 0xa9, 0x42, 0x8c, 0xe3, 0x76, 0xeb, 0xe5, 0xf2, 0x92,
+ 0x74, 0xc3, 0x30, 0x41, 0x3a, 0x54, 0x85, 0xd7, 0x77, 0xfa, 0xf6, 0xc6, 0xd1, 0xf2, 0xee, 0x57,
+ 0x69, 0xf4, 0x1a, 0x61, 0xa4, 0x95, 0xc1, 0x5f, 0x22, 0x55, 0x18, 0xc9, 0xcc, 0xbe, 0xd4, 0x7b,
+ 0xb0, 0x51, 0x4f, 0x98, 0xef, 0xed, 0x55, 0x9a, 0xaa, 0x58, 0x31, 0x52, 0x4a, 0x38, 0x7f, 0x28,
+ 0xe8, 0xfe, 0x92, 0xb3, 0x5f, 0xc7, 0x8c, 0xe3, 0xef, 0x56, 0xdc, 0x75, 0x5f, 0xcd, 0x5d, 0x11,
+ 0x2d, 0xbd, 0x6d, 0xda, 0xb2, 0x46, 0x3a, 0xce, 0x7e, 0x85, 0xd4, 0x98, 0xc3, 0xb4, 0xf6, 0x63,
+ 0xf3, 0x53, 0x23, 0x0b, 0x6b, 0x0f, 0xf0, 0x85, 0x08, 0x26, 0xa5, 0x86, 0x73, 0x80, 0x76, 0xaa,
+ 0xce, 0xc7, 0xc3, 0xa5, 0xee, 0xde, 0xad, 0xe8, 0x9d, 0x0e, 0xaf, 0xc8, 0x62, 0xd8, 0x36, 0x93,
+ 0xbd, 0x4f, 0x6f, 0x6e, 0xad, 0xde, 0xf3, 0x5b, 0xab, 0xf7, 0xe2, 0xd6, 0xea, 0xfd, 0x5c, 0x58,
+ 0xca, 0x4d, 0x61, 0x29, 0xcf, 0x0b, 0x4b, 0x79, 0x51, 0x58, 0xca, 0xdf, 0x85, 0xa5, 0xfc, 0xf6,
+ 0x8f, 0xd5, 0x7b, 0xb6, 0xbf, 0xe6, 0x97, 0xe2, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf4, 0xfc,
+ 0xbe, 0xad, 0x6c, 0x08, 0x00, 0x00,
}
func (m *Endpoint) Marshal() (dAtA []byte, err error) {
@@ -500,6 +531,20 @@ func (m *EndpointHints) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if len(m.ForNodes) > 0 {
+ for iNdEx := len(m.ForNodes) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ForNodes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
if len(m.ForZones) > 0 {
for iNdEx := len(m.ForZones) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -679,6 +724,34 @@ func (m *EndpointSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *ForNode) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ForNode) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ForNode) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func (m *ForZone) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -793,6 +866,12 @@ func (m *EndpointHints) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
+ if len(m.ForNodes) > 0 {
+ for _, e := range m.ForNodes {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
return n
}
@@ -862,6 +941,17 @@ func (m *EndpointSliceList) Size() (n int) {
return n
}
+func (m *ForNode) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m *ForZone) Size() (n int) {
if m == nil {
return 0
@@ -927,8 +1017,14 @@ func (this *EndpointHints) String() string {
repeatedStringForForZones += strings.Replace(strings.Replace(f.String(), "ForZone", "ForZone", 1), `&`, ``, 1) + ","
}
repeatedStringForForZones += "}"
+ repeatedStringForForNodes := "[]ForNode{"
+ for _, f := range this.ForNodes {
+ repeatedStringForForNodes += strings.Replace(strings.Replace(f.String(), "ForNode", "ForNode", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForForNodes += "}"
s := strings.Join([]string{`&EndpointHints{`,
`ForZones:` + repeatedStringForForZones + `,`,
+ `ForNodes:` + repeatedStringForForNodes + `,`,
`}`,
}, "")
return s
@@ -985,6 +1081,16 @@ func (this *EndpointSliceList) String() string {
}, "")
return s
}
+func (this *ForNode) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ForNode{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *ForZone) String() string {
if this == nil {
return "nil"
@@ -1592,6 +1698,40 @@ func (m *EndpointHints) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ForNodes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ForNodes = append(m.ForNodes, ForNode{})
+ if err := m.ForNodes[len(m.ForNodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -2082,6 +2222,88 @@ func (m *EndpointSliceList) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *ForNode) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ForNode: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ForNode: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *ForZone) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/vendor/k8s.io/api/discovery/v1/generated.proto b/vendor/k8s.io/api/discovery/v1/generated.proto
index 8ddf0dc5d..569d8a916 100644
--- a/vendor/k8s.io/api/discovery/v1/generated.proto
+++ b/vendor/k8s.io/api/discovery/v1/generated.proto
@@ -31,12 +31,12 @@ option go_package = "k8s.io/api/discovery/v1";
// Endpoint represents a single logical "backend" implementing a service.
message Endpoint {
- // addresses of this endpoint. The contents of this field are interpreted
- // according to the corresponding EndpointSlice addressType field. Consumers
- // must handle different types of addresses in the context of their own
- // capabilities. This must contain at least one address but no more than
- // 100. These are all assumed to be fungible and clients may choose to only
- // use the first element. Refer to: https://issue.k8s.io/106267
+ // addresses of this endpoint. For EndpointSlices of addressType "IPv4" or "IPv6",
+ // the values are IP addresses in canonical form. The syntax and semantics of
+ // other addressType values are not defined. This must contain at least one
+ // address but no more than 100. EndpointSlices generated by the EndpointSlice
+ // controller will always have exactly 1 address. No semantics are defined for
+ // additional addresses beyond the first, and kube-proxy does not look at them.
// +listType=set
repeated string addresses = 1;
@@ -82,36 +82,42 @@ message Endpoint {
// EndpointConditions represents the current condition of an endpoint.
message EndpointConditions {
- // ready indicates that this endpoint is prepared to receive traffic,
+ // ready indicates that this endpoint is ready to receive traffic,
// according to whatever system is managing the endpoint. A nil value
- // indicates an unknown state. In most cases consumers should interpret this
- // unknown state as ready. For compatibility reasons, ready should never be
- // "true" for terminating endpoints, except when the normal readiness
- // behavior is being explicitly overridden, for example when the associated
- // Service has set the publishNotReadyAddresses flag.
+ // should be interpreted as "true". In general, an endpoint should be
+ // marked ready if it is serving and not terminating, though this can
+ // be overridden in some cases, such as when the associated Service has
+ // set the publishNotReadyAddresses flag.
// +optional
optional bool ready = 1;
- // serving is identical to ready except that it is set regardless of the
- // terminating state of endpoints. This condition should be set to true for
- // a ready endpoint that is terminating. If nil, consumers should defer to
- // the ready condition.
+ // serving indicates that this endpoint is able to receive traffic,
+ // according to whatever system is managing the endpoint. For endpoints
+ // backed by pods, the EndpointSlice controller will mark the endpoint
+ // as serving if the pod's Ready condition is True. A nil value should be
+ // interpreted as "true".
// +optional
optional bool serving = 2;
// terminating indicates that this endpoint is terminating. A nil value
- // indicates an unknown state. Consumers should interpret this unknown state
- // to mean that the endpoint is not terminating.
+ // should be interpreted as "false".
// +optional
optional bool terminating = 3;
}
// EndpointHints provides hints describing how an endpoint should be consumed.
message EndpointHints {
- // forZones indicates the zone(s) this endpoint should be consumed by to
- // enable topology aware routing.
+ // forZones indicates the zone(s) this endpoint should be consumed by when
+ // using topology aware routing. May contain a maximum of 8 entries.
// +listType=atomic
repeated ForZone forZones = 1;
+
+ // forNodes indicates the node(s) this endpoint should be consumed by when
+ // using topology aware routing. May contain a maximum of 8 entries.
+ // This is an Alpha feature and is only used when the PreferSameTrafficDistribution
+ // feature gate is enabled.
+ // +listType=atomic
+ repeated ForNode forNodes = 2;
}
// EndpointPort represents a Port used by an EndpointSlice
@@ -132,8 +138,9 @@ message EndpointPort {
optional string protocol = 2;
// port represents the port number of the endpoint.
- // If this is not specified, ports are not restricted and must be
- // interpreted in the context of the specific consumer.
+ // If the EndpointSlice is derived from a Kubernetes service, this must be set
+ // to the service's target port. EndpointSlices used for other purposes may have
+ // a nil port.
optional int32 port = 3;
// The application protocol for this port.
@@ -155,9 +162,12 @@ message EndpointPort {
optional string appProtocol = 4;
}
-// EndpointSlice represents a subset of the endpoints that implement a service.
-// For a given service there may be multiple EndpointSlice objects, selected by
-// labels, which must be joined to produce the full set of endpoints.
+// EndpointSlice represents a set of service endpoints. Most EndpointSlices are created by
+// the EndpointSlice controller to represent the Pods selected by Service objects. For a
+// given service there may be multiple EndpointSlice objects which must be joined to
+// produce the full set of endpoints; you can find all of the slices for a given service
+// by listing EndpointSlices in the service's namespace whose `kubernetes.io/service-name`
+// label contains the service's name.
message EndpointSlice {
// Standard object's metadata.
// +optional
@@ -169,7 +179,10 @@ message EndpointSlice {
// supported:
// * IPv4: Represents an IPv4 Address.
// * IPv6: Represents an IPv6 Address.
- // * FQDN: Represents a Fully Qualified Domain Name.
+ // * FQDN: Represents a Fully Qualified Domain Name. (Deprecated)
+ // The EndpointSlice controller only generates, and kube-proxy only processes,
+ // slices of addressType "IPv4" and "IPv6". No semantics are defined for
+ // the "FQDN" type.
optional string addressType = 4;
// endpoints is a list of unique endpoints in this slice. Each slice may
@@ -178,10 +191,11 @@ message EndpointSlice {
repeated Endpoint endpoints = 2;
// ports specifies the list of network ports exposed by each endpoint in
- // this slice. Each port must have a unique name. When ports is empty, it
- // indicates that there are no defined ports. When a port is defined with a
- // nil port value, it indicates "all ports". Each slice may include a
+ // this slice. Each port must have a unique name. Each slice may include a
// maximum of 100 ports.
+ // Services always have at least 1 port, so EndpointSlices generated by the
+ // EndpointSlice controller will likewise always have at least 1 port.
+ // EndpointSlices used for other purposes may have an empty ports list.
// +optional
// +listType=atomic
repeated EndpointPort ports = 3;
@@ -197,6 +211,12 @@ message EndpointSliceList {
repeated EndpointSlice items = 2;
}
+// ForNode provides information about which nodes should consume this endpoint.
+message ForNode {
+ // name represents the name of the node.
+ optional string name = 1;
+}
+
// ForZone provides information about which zones should consume this endpoint.
message ForZone {
// name represents the name of the zone.
diff --git a/vendor/k8s.io/api/discovery/v1/types.go b/vendor/k8s.io/api/discovery/v1/types.go
index d6a9d0fce..6f2695316 100644
--- a/vendor/k8s.io/api/discovery/v1/types.go
+++ b/vendor/k8s.io/api/discovery/v1/types.go
@@ -25,9 +25,12 @@ import (
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.21
-// EndpointSlice represents a subset of the endpoints that implement a service.
-// For a given service there may be multiple EndpointSlice objects, selected by
-// labels, which must be joined to produce the full set of endpoints.
+// EndpointSlice represents a set of service endpoints. Most EndpointSlices are created by
+// the EndpointSlice controller to represent the Pods selected by Service objects. For a
+// given service there may be multiple EndpointSlice objects which must be joined to
+// produce the full set of endpoints; you can find all of the slices for a given service
+// by listing EndpointSlices in the service's namespace whose `kubernetes.io/service-name`
+// label contains the service's name.
type EndpointSlice struct {
metav1.TypeMeta `json:",inline"`
@@ -41,7 +44,10 @@ type EndpointSlice struct {
// supported:
// * IPv4: Represents an IPv4 Address.
// * IPv6: Represents an IPv6 Address.
- // * FQDN: Represents a Fully Qualified Domain Name.
+ // * FQDN: Represents a Fully Qualified Domain Name. (Deprecated)
+ // The EndpointSlice controller only generates, and kube-proxy only processes,
+ // slices of addressType "IPv4" and "IPv6". No semantics are defined for
+ // the "FQDN" type.
AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"`
// endpoints is a list of unique endpoints in this slice. Each slice may
@@ -50,10 +56,11 @@ type EndpointSlice struct {
Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"`
// ports specifies the list of network ports exposed by each endpoint in
- // this slice. Each port must have a unique name. When ports is empty, it
- // indicates that there are no defined ports. When a port is defined with a
- // nil port value, it indicates "all ports". Each slice may include a
+ // this slice. Each port must have a unique name. Each slice may include a
// maximum of 100 ports.
+ // Services always have at least 1 port, so EndpointSlices generated by the
+ // EndpointSlice controller will likewise always have at least 1 port.
+ // EndpointSlices used for other purposes may have an empty ports list.
// +optional
// +listType=atomic
Ports []EndpointPort `json:"ports" protobuf:"bytes,3,rep,name=ports"`
@@ -76,12 +83,12 @@ const (
// Endpoint represents a single logical "backend" implementing a service.
type Endpoint struct {
- // addresses of this endpoint. The contents of this field are interpreted
- // according to the corresponding EndpointSlice addressType field. Consumers
- // must handle different types of addresses in the context of their own
- // capabilities. This must contain at least one address but no more than
- // 100. These are all assumed to be fungible and clients may choose to only
- // use the first element. Refer to: https://issue.k8s.io/106267
+ // addresses of this endpoint. For EndpointSlices of addressType "IPv4" or "IPv6",
+ // the values are IP addresses in canonical form. The syntax and semantics of
+ // other addressType values are not defined. This must contain at least one
+ // address but no more than 100. EndpointSlices generated by the EndpointSlice
+ // controller will always have exactly 1 address. No semantics are defined for
+ // additional addresses beyond the first, and kube-proxy does not look at them.
// +listType=set
Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"`
@@ -127,36 +134,42 @@ type Endpoint struct {
// EndpointConditions represents the current condition of an endpoint.
type EndpointConditions struct {
- // ready indicates that this endpoint is prepared to receive traffic,
+ // ready indicates that this endpoint is ready to receive traffic,
// according to whatever system is managing the endpoint. A nil value
- // indicates an unknown state. In most cases consumers should interpret this
- // unknown state as ready. For compatibility reasons, ready should never be
- // "true" for terminating endpoints, except when the normal readiness
- // behavior is being explicitly overridden, for example when the associated
- // Service has set the publishNotReadyAddresses flag.
+ // should be interpreted as "true". In general, an endpoint should be
+ // marked ready if it is serving and not terminating, though this can
+ // be overridden in some cases, such as when the associated Service has
+ // set the publishNotReadyAddresses flag.
// +optional
Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"`
- // serving is identical to ready except that it is set regardless of the
- // terminating state of endpoints. This condition should be set to true for
- // a ready endpoint that is terminating. If nil, consumers should defer to
- // the ready condition.
+ // serving indicates that this endpoint is able to receive traffic,
+ // according to whatever system is managing the endpoint. For endpoints
+ // backed by pods, the EndpointSlice controller will mark the endpoint
+ // as serving if the pod's Ready condition is True. A nil value should be
+ // interpreted as "true".
// +optional
Serving *bool `json:"serving,omitempty" protobuf:"bytes,2,name=serving"`
// terminating indicates that this endpoint is terminating. A nil value
- // indicates an unknown state. Consumers should interpret this unknown state
- // to mean that the endpoint is not terminating.
+ // should be interpreted as "false".
// +optional
Terminating *bool `json:"terminating,omitempty" protobuf:"bytes,3,name=terminating"`
}
// EndpointHints provides hints describing how an endpoint should be consumed.
type EndpointHints struct {
- // forZones indicates the zone(s) this endpoint should be consumed by to
- // enable topology aware routing.
+ // forZones indicates the zone(s) this endpoint should be consumed by when
+ // using topology aware routing. May contain a maximum of 8 entries.
// +listType=atomic
ForZones []ForZone `json:"forZones,omitempty" protobuf:"bytes,1,name=forZones"`
+
+ // forNodes indicates the node(s) this endpoint should be consumed by when
+ // using topology aware routing. May contain a maximum of 8 entries.
+ // This is an Alpha feature and is only used when the PreferSameTrafficDistribution
+ // feature gate is enabled.
+ // +listType=atomic
+ ForNodes []ForNode `json:"forNodes,omitempty" protobuf:"bytes,2,name=forNodes"`
}
// ForZone provides information about which zones should consume this endpoint.
@@ -165,6 +178,12 @@ type ForZone struct {
Name string `json:"name" protobuf:"bytes,1,name=name"`
}
+// ForNode provides information about which nodes should consume this endpoint.
+type ForNode struct {
+ // name represents the name of the node.
+ Name string `json:"name" protobuf:"bytes,1,name=name"`
+}
+
// EndpointPort represents a Port used by an EndpointSlice
// +structType=atomic
type EndpointPort struct {
@@ -183,8 +202,9 @@ type EndpointPort struct {
Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"`
// port represents the port number of the endpoint.
- // If this is not specified, ports are not restricted and must be
- // interpreted in the context of the specific consumer.
+ // If the EndpointSlice is derived from a Kubernetes service, this must be set
+ // to the service's target port. EndpointSlices used for other purposes may have
+ // a nil port.
Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"`
// The application protocol for this port.
diff --git a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go
index 41c306056..ac5b853b9 100644
--- a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go
@@ -29,7 +29,7 @@ package v1
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_Endpoint = map[string]string{
"": "Endpoint represents a single logical \"backend\" implementing a service.",
- "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100. These are all assumed to be fungible and clients may choose to only use the first element. Refer to: https://issue.k8s.io/106267",
+ "addresses": "addresses of this endpoint. For EndpointSlices of addressType \"IPv4\" or \"IPv6\", the values are IP addresses in canonical form. The syntax and semantics of other addressType values are not defined. This must contain at least one address but no more than 100. EndpointSlices generated by the EndpointSlice controller will always have exactly 1 address. No semantics are defined for additional addresses beyond the first, and kube-proxy does not look at them.",
"conditions": "conditions contains information about the current status of the endpoint.",
"hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation.",
"targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.",
@@ -45,9 +45,9 @@ func (Endpoint) SwaggerDoc() map[string]string {
var map_EndpointConditions = map[string]string{
"": "EndpointConditions represents the current condition of an endpoint.",
- "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints, except when the normal readiness behavior is being explicitly overridden, for example when the associated Service has set the publishNotReadyAddresses flag.",
- "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition.",
- "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating.",
+ "ready": "ready indicates that this endpoint is ready to receive traffic, according to whatever system is managing the endpoint. A nil value should be interpreted as \"true\". In general, an endpoint should be marked ready if it is serving and not terminating, though this can be overridden in some cases, such as when the associated Service has set the publishNotReadyAddresses flag.",
+ "serving": "serving indicates that this endpoint is able to receive traffic, according to whatever system is managing the endpoint. For endpoints backed by pods, the EndpointSlice controller will mark the endpoint as serving if the pod's Ready condition is True. A nil value should be interpreted as \"true\".",
+ "terminating": "terminating indicates that this endpoint is terminating. A nil value should be interpreted as \"false\".",
}
func (EndpointConditions) SwaggerDoc() map[string]string {
@@ -56,7 +56,8 @@ func (EndpointConditions) SwaggerDoc() map[string]string {
var map_EndpointHints = map[string]string{
"": "EndpointHints provides hints describing how an endpoint should be consumed.",
- "forZones": "forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing.",
+ "forZones": "forZones indicates the zone(s) this endpoint should be consumed by when using topology aware routing. May contain a maximum of 8 entries.",
+ "forNodes": "forNodes indicates the node(s) this endpoint should be consumed by when using topology aware routing. May contain a maximum of 8 entries. This is an Alpha feature and is only used when the PreferSameTrafficDistribution feature gate is enabled.",
}
func (EndpointHints) SwaggerDoc() map[string]string {
@@ -67,7 +68,7 @@ var map_EndpointPort = map[string]string{
"": "EndpointPort represents a Port used by an EndpointSlice",
"name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.",
"protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
- "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.",
+ "port": "port represents the port number of the endpoint. If the EndpointSlice is derived from a Kubernetes service, this must be set to the service's target port. EndpointSlices used for other purposes may have a nil port.",
"appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.",
}
@@ -76,11 +77,11 @@ func (EndpointPort) SwaggerDoc() map[string]string {
}
var map_EndpointSlice = map[string]string{
- "": "EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.",
+ "": "EndpointSlice represents a set of service endpoints. Most EndpointSlices are created by the EndpointSlice controller to represent the Pods selected by Service objects. For a given service there may be multiple EndpointSlice objects which must be joined to produce the full set of endpoints; you can find all of the slices for a given service by listing EndpointSlices in the service's namespace whose `kubernetes.io/service-name` label contains the service's name.",
"metadata": "Standard object's metadata.",
- "addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.",
+ "addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name. (Deprecated) The EndpointSlice controller only generates, and kube-proxy only processes, slices of addressType \"IPv4\" and \"IPv6\". No semantics are defined for the \"FQDN\" type.",
"endpoints": "endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.",
- "ports": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports.",
+ "ports": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. Each slice may include a maximum of 100 ports. Services always have at least 1 port, so EndpointSlices generated by the EndpointSlice controller will likewise always have at least 1 port. EndpointSlices used for other purposes may have an empty ports list.",
}
func (EndpointSlice) SwaggerDoc() map[string]string {
@@ -97,6 +98,15 @@ func (EndpointSliceList) SwaggerDoc() map[string]string {
return map_EndpointSliceList
}
+var map_ForNode = map[string]string{
+ "": "ForNode provides information about which nodes should consume this endpoint.",
+ "name": "name represents the name of the node.",
+}
+
+func (ForNode) SwaggerDoc() map[string]string {
+ return map_ForNode
+}
+
var map_ForZone = map[string]string{
"": "ForZone provides information about which zones should consume this endpoint.",
"name": "name represents the name of the zone.",
diff --git a/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go
index caa872af0..60eada3b9 100644
--- a/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go
@@ -119,6 +119,11 @@ func (in *EndpointHints) DeepCopyInto(out *EndpointHints) {
*out = make([]ForZone, len(*in))
copy(*out, *in)
}
+ if in.ForNodes != nil {
+ in, out := &in.ForNodes, &out.ForNodes
+ *out = make([]ForNode, len(*in))
+ copy(*out, *in)
+ }
return
}
@@ -241,6 +246,22 @@ func (in *EndpointSliceList) DeepCopyObject() runtime.Object {
return nil
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ForNode) DeepCopyInto(out *ForNode) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForNode.
+func (in *ForNode) DeepCopy() *ForNode {
+ if in == nil {
+ return nil
+ }
+ out := new(ForNode)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ForZone) DeepCopyInto(out *ForZone) {
*out = *in
diff --git a/vendor/k8s.io/api/discovery/v1beta1/doc.go b/vendor/k8s.io/api/discovery/v1beta1/doc.go
index 7d7084802..f12087eff 100644
--- a/vendor/k8s.io/api/discovery/v1beta1/doc.go
+++ b/vendor/k8s.io/api/discovery/v1beta1/doc.go
@@ -20,4 +20,4 @@ limitations under the License.
// +k8s:prerelease-lifecycle-gen=true
// +groupName=discovery.k8s.io
-package v1beta1 // import "k8s.io/api/discovery/v1beta1"
+package v1beta1
diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go b/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go
index 46935574b..de3257786 100644
--- a/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go
@@ -214,10 +214,38 @@ func (m *EndpointSliceList) XXX_DiscardUnknown() {
var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo
+func (m *ForNode) Reset() { *m = ForNode{} }
+func (*ForNode) ProtoMessage() {}
+func (*ForNode) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6555bad15de200e0, []int{6}
+}
+func (m *ForNode) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ForNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ForNode) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ForNode.Merge(m, src)
+}
+func (m *ForNode) XXX_Size() int {
+ return m.Size()
+}
+func (m *ForNode) XXX_DiscardUnknown() {
+ xxx_messageInfo_ForNode.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ForNode proto.InternalMessageInfo
+
func (m *ForZone) Reset() { *m = ForZone{} }
func (*ForZone) ProtoMessage() {}
func (*ForZone) Descriptor() ([]byte, []int) {
- return fileDescriptor_6555bad15de200e0, []int{6}
+ return fileDescriptor_6555bad15de200e0, []int{7}
}
func (m *ForZone) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -250,6 +278,7 @@ func init() {
proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.discovery.v1beta1.EndpointPort")
proto.RegisterType((*EndpointSlice)(nil), "k8s.io.api.discovery.v1beta1.EndpointSlice")
proto.RegisterType((*EndpointSliceList)(nil), "k8s.io.api.discovery.v1beta1.EndpointSliceList")
+ proto.RegisterType((*ForNode)(nil), "k8s.io.api.discovery.v1beta1.ForNode")
proto.RegisterType((*ForZone)(nil), "k8s.io.api.discovery.v1beta1.ForZone")
}
@@ -258,61 +287,62 @@ func init() {
}
var fileDescriptor_6555bad15de200e0 = []byte{
- // 857 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4f, 0x6f, 0xe4, 0x34,
- 0x14, 0x9f, 0x74, 0x1a, 0x9a, 0x78, 0x5a, 0xb1, 0x6b, 0x71, 0x18, 0x95, 0x2a, 0x19, 0x05, 0x2d,
+ // 877 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0x4f, 0x6f, 0xe4, 0x34,
+ 0x1c, 0x9d, 0x74, 0x1a, 0x9a, 0x78, 0x5a, 0xb1, 0x6b, 0x71, 0x18, 0x95, 0x2a, 0x19, 0x05, 0x2d,
0x1a, 0x51, 0x48, 0x68, 0xb5, 0x42, 0x2b, 0x38, 0x35, 0xb0, 0xb0, 0x48, 0xcb, 0x6e, 0xe5, 0x56,
0x42, 0x5a, 0x71, 0xc0, 0x93, 0xb8, 0x19, 0xd3, 0x26, 0x8e, 0x62, 0x77, 0xa4, 0xb9, 0xf1, 0x0d,
- 0xe0, 0xb3, 0xf0, 0x15, 0x90, 0x50, 0x8f, 0x7b, 0xdc, 0x53, 0xc4, 0x84, 0x6f, 0xb1, 0x27, 0x64,
- 0xc7, 0xf9, 0x33, 0x0c, 0x94, 0xb9, 0xc5, 0x3f, 0xbf, 0xdf, 0xef, 0xbd, 0xf7, 0x7b, 0xb6, 0x03,
- 0x3e, 0xbe, 0x7e, 0xc2, 0x7d, 0xca, 0x02, 0x9c, 0xd3, 0x20, 0xa6, 0x3c, 0x62, 0x0b, 0x52, 0x2c,
- 0x83, 0xc5, 0xc9, 0x8c, 0x08, 0x7c, 0x12, 0x24, 0x24, 0x23, 0x05, 0x16, 0x24, 0xf6, 0xf3, 0x82,
- 0x09, 0x06, 0x8f, 0xea, 0x68, 0x1f, 0xe7, 0xd4, 0x6f, 0xa3, 0x7d, 0x1d, 0x7d, 0xf8, 0x49, 0x42,
- 0xc5, 0xfc, 0x76, 0xe6, 0x47, 0x2c, 0x0d, 0x12, 0x96, 0xb0, 0x40, 0x91, 0x66, 0xb7, 0x57, 0x6a,
- 0xa5, 0x16, 0xea, 0xab, 0x16, 0x3b, 0xf4, 0x7a, 0xa9, 0x23, 0x56, 0x90, 0x60, 0xb1, 0x91, 0xf0,
- 0xf0, 0x71, 0x17, 0x93, 0xe2, 0x68, 0x4e, 0x33, 0x59, 0x5d, 0x7e, 0x9d, 0x48, 0x80, 0x07, 0x29,
- 0x11, 0xf8, 0xdf, 0x58, 0xc1, 0x7f, 0xb1, 0x8a, 0xdb, 0x4c, 0xd0, 0x94, 0x6c, 0x10, 0x3e, 0xfb,
- 0x3f, 0x02, 0x8f, 0xe6, 0x24, 0xc5, 0xff, 0xe4, 0x79, 0xbf, 0xed, 0x02, 0xeb, 0x69, 0x16, 0xe7,
- 0x8c, 0x66, 0x02, 0x1e, 0x03, 0x1b, 0xc7, 0x71, 0x41, 0x38, 0x27, 0x7c, 0x6c, 0x4c, 0x86, 0x53,
- 0x3b, 0x3c, 0xa8, 0x4a, 0xd7, 0x3e, 0x6b, 0x40, 0xd4, 0xed, 0xc3, 0x18, 0x80, 0x88, 0x65, 0x31,
- 0x15, 0x94, 0x65, 0x7c, 0xbc, 0x33, 0x31, 0xa6, 0xa3, 0xd3, 0x4f, 0xfd, 0xfb, 0xec, 0xf5, 0x9b,
- 0x44, 0x5f, 0xb6, 0xbc, 0x10, 0xde, 0x95, 0xee, 0xa0, 0x2a, 0x5d, 0xd0, 0x61, 0xa8, 0xa7, 0x0b,
- 0xa7, 0xc0, 0x9a, 0x33, 0x2e, 0x32, 0x9c, 0x92, 0xf1, 0x70, 0x62, 0x4c, 0xed, 0x70, 0xbf, 0x2a,
- 0x5d, 0xeb, 0x99, 0xc6, 0x50, 0xbb, 0x0b, 0xcf, 0x81, 0x2d, 0x70, 0x91, 0x10, 0x81, 0xc8, 0xd5,
- 0x78, 0x57, 0x95, 0xf3, 0x41, 0xbf, 0x1c, 0x39, 0x20, 0x7f, 0x71, 0xe2, 0xbf, 0x9c, 0xfd, 0x44,
- 0x22, 0x19, 0x44, 0x0a, 0x92, 0x45, 0xa4, 0xee, 0xf0, 0xb2, 0x61, 0xa2, 0x4e, 0x04, 0xce, 0x80,
- 0x25, 0x58, 0xce, 0x6e, 0x58, 0xb2, 0x1c, 0x9b, 0x93, 0xe1, 0x74, 0x74, 0xfa, 0x78, 0xbb, 0xfe,
- 0xfc, 0x4b, 0x4d, 0x7b, 0x9a, 0x89, 0x62, 0x19, 0x3e, 0xd0, 0x3d, 0x5a, 0x0d, 0x8c, 0x5a, 0x5d,
- 0xd9, 0x5f, 0xc6, 0x62, 0xf2, 0x42, 0xf6, 0xf7, 0x4e, 0xd7, 0xdf, 0x0b, 0x8d, 0xa1, 0x76, 0x17,
- 0x3e, 0x07, 0xe6, 0x9c, 0x66, 0x82, 0x8f, 0xf7, 0x54, 0x6f, 0xc7, 0xdb, 0x95, 0xf2, 0x4c, 0x52,
- 0x42, 0xbb, 0x2a, 0x5d, 0x53, 0x7d, 0xa2, 0x5a, 0xe4, 0xf0, 0x0b, 0x70, 0xb0, 0x56, 0x24, 0x7c,
- 0x00, 0x86, 0xd7, 0x64, 0x39, 0x36, 0x64, 0x0d, 0x48, 0x7e, 0xc2, 0xf7, 0x80, 0xb9, 0xc0, 0x37,
- 0xb7, 0x44, 0xcd, 0xd6, 0x46, 0xf5, 0xe2, 0xf3, 0x9d, 0x27, 0x86, 0xf7, 0x8b, 0x01, 0xe0, 0xe6,
- 0x2c, 0xa1, 0x0b, 0xcc, 0x82, 0xe0, 0xb8, 0x16, 0xb1, 0xea, 0xa4, 0x48, 0x02, 0xa8, 0xc6, 0xe1,
- 0x23, 0xb0, 0xc7, 0x49, 0xb1, 0xa0, 0x59, 0xa2, 0x34, 0xad, 0x70, 0x54, 0x95, 0xee, 0xde, 0x45,
- 0x0d, 0xa1, 0x66, 0x0f, 0x9e, 0x80, 0x91, 0x20, 0x45, 0x4a, 0x33, 0x2c, 0x64, 0xe8, 0x50, 0x85,
- 0xbe, 0x5b, 0x95, 0xee, 0xe8, 0xb2, 0x83, 0x51, 0x3f, 0xc6, 0x8b, 0xc1, 0xc1, 0x5a, 0xc7, 0xf0,
- 0x02, 0x58, 0x57, 0xac, 0x78, 0xc5, 0x32, 0x7d, 0x92, 0x47, 0xa7, 0x8f, 0xee, 0x37, 0xec, 0xeb,
- 0x3a, 0xba, 0x1b, 0x96, 0x06, 0x38, 0x6a, 0x85, 0xbc, 0x3f, 0x0c, 0xb0, 0xdf, 0xa4, 0x39, 0x67,
- 0x85, 0x80, 0x47, 0x60, 0x57, 0x9d, 0x4c, 0xe5, 0x5a, 0x68, 0x55, 0xa5, 0xbb, 0xab, 0xa6, 0xa6,
- 0x50, 0xf8, 0x0d, 0xb0, 0xd4, 0x25, 0x8b, 0xd8, 0x4d, 0xed, 0x61, 0x78, 0x2c, 0x85, 0xcf, 0x35,
- 0xf6, 0xb6, 0x74, 0xdf, 0xdf, 0x7c, 0x40, 0xfc, 0x66, 0x1b, 0xb5, 0x64, 0x99, 0x26, 0x67, 0x85,
- 0x50, 0x4e, 0x98, 0x75, 0x1a, 0x99, 0x1e, 0x29, 0x54, 0xda, 0x85, 0xf3, 0xbc, 0xa1, 0xa9, 0xa3,
- 0x6f, 0xd7, 0x76, 0x9d, 0x75, 0x30, 0xea, 0xc7, 0x78, 0xab, 0x9d, 0xce, 0xaf, 0x8b, 0x1b, 0x1a,
- 0x11, 0xf8, 0x23, 0xb0, 0xe4, 0x5b, 0x14, 0x63, 0x81, 0x55, 0x37, 0xeb, 0x77, 0xb9, 0x7d, 0x52,
- 0xfc, 0xfc, 0x3a, 0x91, 0x00, 0xf7, 0x65, 0x74, 0x77, 0x9d, 0xbe, 0x23, 0x02, 0x77, 0x77, 0xb9,
- 0xc3, 0x50, 0xab, 0x0a, 0xbf, 0x02, 0x23, 0xfd, 0x78, 0x5c, 0x2e, 0x73, 0xa2, 0xcb, 0xf4, 0x34,
- 0x65, 0x74, 0xd6, 0x6d, 0xbd, 0x5d, 0x5f, 0xa2, 0x3e, 0x0d, 0x7e, 0x0f, 0x6c, 0xa2, 0x0b, 0x97,
- 0x8f, 0x8e, 0x1c, 0xec, 0x87, 0xdb, 0xdd, 0x84, 0xf0, 0xa1, 0xce, 0x65, 0x37, 0x08, 0x47, 0x9d,
- 0x16, 0x7c, 0x09, 0x4c, 0xe9, 0x26, 0x1f, 0x0f, 0x95, 0xe8, 0x47, 0xdb, 0x89, 0xca, 0x31, 0x84,
- 0x07, 0x5a, 0xd8, 0x94, 0x2b, 0x8e, 0x6a, 0x1d, 0xef, 0x77, 0x03, 0x3c, 0x5c, 0xf3, 0xf8, 0x39,
- 0xe5, 0x02, 0xfe, 0xb0, 0xe1, 0xb3, 0xbf, 0x9d, 0xcf, 0x92, 0xad, 0x5c, 0x6e, 0x0f, 0x68, 0x83,
- 0xf4, 0x3c, 0x3e, 0x07, 0x26, 0x15, 0x24, 0x6d, 0x9c, 0xd9, 0xf2, 0x8d, 0x50, 0xd5, 0x75, 0x5d,
- 0x7c, 0x2b, 0x15, 0x50, 0x2d, 0xe4, 0x1d, 0x83, 0x3d, 0x7d, 0x11, 0xe0, 0x64, 0xed, 0xb0, 0xef,
- 0xeb, 0xf0, 0xde, 0x81, 0x0f, 0xc3, 0xbb, 0x95, 0x33, 0x78, 0xbd, 0x72, 0x06, 0x6f, 0x56, 0xce,
- 0xe0, 0xe7, 0xca, 0x31, 0xee, 0x2a, 0xc7, 0x78, 0x5d, 0x39, 0xc6, 0x9b, 0xca, 0x31, 0xfe, 0xac,
- 0x1c, 0xe3, 0xd7, 0xbf, 0x9c, 0xc1, 0xab, 0xa3, 0xfb, 0x7e, 0xd8, 0x7f, 0x07, 0x00, 0x00, 0xff,
- 0xff, 0x1c, 0xe6, 0x20, 0x06, 0xcf, 0x07, 0x00, 0x00,
+ 0xe0, 0xb3, 0x70, 0xe3, 0x8c, 0x84, 0x7a, 0xdc, 0xe3, 0x9e, 0x22, 0x1a, 0xbe, 0xc5, 0x9e, 0x90,
+ 0x1d, 0xe7, 0xcf, 0x30, 0xd0, 0xce, 0x2d, 0x7e, 0x7e, 0xef, 0xfd, 0xfe, 0xd9, 0x56, 0xc0, 0xc7,
+ 0x97, 0x4f, 0xb8, 0x4f, 0x59, 0x80, 0x73, 0x1a, 0xc4, 0x94, 0x47, 0x6c, 0x41, 0x8a, 0x65, 0xb0,
+ 0x38, 0x9a, 0x11, 0x81, 0x8f, 0x82, 0x84, 0x64, 0xa4, 0xc0, 0x82, 0xc4, 0x7e, 0x5e, 0x30, 0xc1,
+ 0xe0, 0x41, 0xcd, 0xf6, 0x71, 0x4e, 0xfd, 0x96, 0xed, 0x6b, 0xf6, 0xfe, 0x27, 0x09, 0x15, 0xf3,
+ 0xeb, 0x99, 0x1f, 0xb1, 0x34, 0x48, 0x58, 0xc2, 0x02, 0x25, 0x9a, 0x5d, 0x5f, 0xa8, 0x95, 0x5a,
+ 0xa8, 0xaf, 0xda, 0x6c, 0xdf, 0xeb, 0x85, 0x8e, 0x58, 0x41, 0x82, 0xc5, 0x5a, 0xc0, 0xfd, 0xc7,
+ 0x1d, 0x27, 0xc5, 0xd1, 0x9c, 0x66, 0x32, 0xbb, 0xfc, 0x32, 0x91, 0x00, 0x0f, 0x52, 0x22, 0xf0,
+ 0x7f, 0xa9, 0x82, 0xff, 0x53, 0x15, 0xd7, 0x99, 0xa0, 0x29, 0x59, 0x13, 0x7c, 0x76, 0x9f, 0x80,
+ 0x47, 0x73, 0x92, 0xe2, 0x7f, 0xeb, 0xbc, 0xdf, 0xb6, 0x81, 0xf5, 0x34, 0x8b, 0x73, 0x46, 0x33,
+ 0x01, 0x0f, 0x81, 0x8d, 0xe3, 0xb8, 0x20, 0x9c, 0x13, 0x3e, 0x36, 0x26, 0xc3, 0xa9, 0x1d, 0xee,
+ 0x55, 0xa5, 0x6b, 0x9f, 0x34, 0x20, 0xea, 0xf6, 0x61, 0x0c, 0x40, 0xc4, 0xb2, 0x98, 0x0a, 0xca,
+ 0x32, 0x3e, 0xde, 0x9a, 0x18, 0xd3, 0xd1, 0xf1, 0xa7, 0xfe, 0x5d, 0xed, 0xf5, 0x9b, 0x40, 0x5f,
+ 0xb6, 0xba, 0x10, 0xde, 0x94, 0xee, 0xa0, 0x2a, 0x5d, 0xd0, 0x61, 0xa8, 0xe7, 0x0b, 0xa7, 0xc0,
+ 0x9a, 0x33, 0x2e, 0x32, 0x9c, 0x92, 0xf1, 0x70, 0x62, 0x4c, 0xed, 0x70, 0xb7, 0x2a, 0x5d, 0xeb,
+ 0x99, 0xc6, 0x50, 0xbb, 0x0b, 0x4f, 0x81, 0x2d, 0x70, 0x91, 0x10, 0x81, 0xc8, 0xc5, 0x78, 0x5b,
+ 0xa5, 0xf3, 0x41, 0x3f, 0x1d, 0x39, 0x20, 0x7f, 0x71, 0xe4, 0xbf, 0x9c, 0xfd, 0x44, 0x22, 0x49,
+ 0x22, 0x05, 0xc9, 0x22, 0x52, 0x57, 0x78, 0xde, 0x28, 0x51, 0x67, 0x02, 0x67, 0xc0, 0x12, 0x2c,
+ 0x67, 0x57, 0x2c, 0x59, 0x8e, 0xcd, 0xc9, 0x70, 0x3a, 0x3a, 0x7e, 0xbc, 0x59, 0x7d, 0xfe, 0xb9,
+ 0x96, 0x3d, 0xcd, 0x44, 0xb1, 0x0c, 0x1f, 0xe8, 0x1a, 0xad, 0x06, 0x46, 0xad, 0xaf, 0xac, 0x2f,
+ 0x63, 0x31, 0x79, 0x21, 0xeb, 0x7b, 0xa7, 0xab, 0xef, 0x85, 0xc6, 0x50, 0xbb, 0x0b, 0x9f, 0x03,
+ 0x73, 0x4e, 0x33, 0xc1, 0xc7, 0x3b, 0xaa, 0xb6, 0xc3, 0xcd, 0x52, 0x79, 0x26, 0x25, 0xa1, 0x5d,
+ 0x95, 0xae, 0xa9, 0x3e, 0x51, 0x6d, 0xb2, 0xff, 0x05, 0xd8, 0x5b, 0x49, 0x12, 0x3e, 0x00, 0xc3,
+ 0x4b, 0xb2, 0x1c, 0x1b, 0x32, 0x07, 0x24, 0x3f, 0xe1, 0x7b, 0xc0, 0x5c, 0xe0, 0xab, 0x6b, 0xa2,
+ 0x66, 0x6b, 0xa3, 0x7a, 0xf1, 0xf9, 0xd6, 0x13, 0xc3, 0xfb, 0xc5, 0x00, 0x70, 0x7d, 0x96, 0xd0,
+ 0x05, 0x66, 0x41, 0x70, 0x5c, 0x9b, 0x58, 0x75, 0x50, 0x24, 0x01, 0x54, 0xe3, 0xf0, 0x11, 0xd8,
+ 0xe1, 0xa4, 0x58, 0xd0, 0x2c, 0x51, 0x9e, 0x56, 0x38, 0xaa, 0x4a, 0x77, 0xe7, 0xac, 0x86, 0x50,
+ 0xb3, 0x07, 0x8f, 0xc0, 0x48, 0x90, 0x22, 0xa5, 0x19, 0x16, 0x92, 0x3a, 0x54, 0xd4, 0x77, 0xab,
+ 0xd2, 0x1d, 0x9d, 0x77, 0x30, 0xea, 0x73, 0xbc, 0xdf, 0x0d, 0xb0, 0xb7, 0x52, 0x32, 0x3c, 0x03,
+ 0xd6, 0x05, 0x2b, 0x5e, 0xb1, 0x4c, 0x1f, 0xe5, 0xd1, 0xf1, 0xa3, 0xbb, 0x3b, 0xf6, 0x75, 0xcd,
+ 0xee, 0xa6, 0xa5, 0x01, 0x8e, 0x5a, 0x23, 0x6d, 0x2a, 0x87, 0x23, 0x4f, 0xfc, 0x66, 0xa6, 0x92,
+ 0xbd, 0x62, 0xaa, 0xe4, 0xa8, 0x35, 0xf2, 0xfe, 0x34, 0xc0, 0x6e, 0x93, 0xfb, 0x29, 0x2b, 0x04,
+ 0x3c, 0x00, 0xdb, 0xea, 0xbc, 0xab, 0x59, 0x84, 0x56, 0x55, 0xba, 0xdb, 0xea, 0x2c, 0x28, 0x14,
+ 0x7e, 0x03, 0x2c, 0x75, 0x75, 0x23, 0x76, 0x55, 0x4f, 0x26, 0x3c, 0x94, 0xc6, 0xa7, 0x1a, 0x7b,
+ 0x5b, 0xba, 0xef, 0xaf, 0x3f, 0x4b, 0x7e, 0xb3, 0x8d, 0x5a, 0xb1, 0x0c, 0x93, 0xb3, 0x42, 0xa8,
+ 0xfe, 0x9a, 0x75, 0x18, 0x19, 0x1e, 0x29, 0x54, 0x0e, 0x01, 0xe7, 0x79, 0x23, 0x53, 0x17, 0xca,
+ 0xae, 0x87, 0x70, 0xd2, 0xc1, 0xa8, 0xcf, 0xf1, 0x6e, 0xb7, 0xba, 0x21, 0x9c, 0x5d, 0xd1, 0x88,
+ 0xc0, 0x1f, 0x81, 0x25, 0x5f, 0xb8, 0x18, 0x0b, 0xac, 0xaa, 0x59, 0x7d, 0x21, 0xda, 0x87, 0xca,
+ 0xcf, 0x2f, 0x13, 0x09, 0x70, 0x5f, 0xb2, 0xbb, 0x4b, 0xfa, 0x1d, 0x11, 0xb8, 0x7b, 0x21, 0x3a,
+ 0x0c, 0xb5, 0xae, 0xf0, 0x2b, 0x30, 0xd2, 0x4f, 0xd2, 0xf9, 0x32, 0x27, 0x3a, 0x4d, 0x4f, 0x4b,
+ 0x46, 0x27, 0xdd, 0xd6, 0xdb, 0xd5, 0x25, 0xea, 0xcb, 0xe0, 0xf7, 0xc0, 0x26, 0x3a, 0xf1, 0x66,
+ 0xb0, 0x1f, 0x6e, 0x76, 0xbf, 0xc2, 0x87, 0x3a, 0x96, 0xdd, 0x20, 0x1c, 0x75, 0x5e, 0xf0, 0x25,
+ 0x30, 0x65, 0x37, 0xf9, 0x78, 0xa8, 0x4c, 0x3f, 0xda, 0xcc, 0x54, 0x8e, 0x21, 0xdc, 0xd3, 0xc6,
+ 0xa6, 0x5c, 0x71, 0x54, 0xfb, 0x78, 0x7f, 0x18, 0xe0, 0xe1, 0x4a, 0x8f, 0x9f, 0x53, 0x2e, 0xe0,
+ 0x0f, 0x6b, 0x7d, 0xf6, 0x37, 0xeb, 0xb3, 0x54, 0xab, 0x2e, 0xb7, 0x07, 0xb4, 0x41, 0x7a, 0x3d,
+ 0x3e, 0x05, 0x26, 0x15, 0x24, 0x6d, 0x3a, 0xb3, 0xe1, 0xcb, 0xa3, 0xb2, 0xeb, 0xaa, 0xf8, 0x56,
+ 0x3a, 0xa0, 0xda, 0xc8, 0x3b, 0x04, 0x3b, 0xfa, 0x22, 0xc0, 0xc9, 0xca, 0x61, 0xdf, 0xd5, 0xf4,
+ 0xde, 0x81, 0xd7, 0x64, 0x79, 0x01, 0xef, 0x27, 0x87, 0xe1, 0xcd, 0xad, 0x33, 0x78, 0x7d, 0xeb,
+ 0x0c, 0xde, 0xdc, 0x3a, 0x83, 0x9f, 0x2b, 0xc7, 0xb8, 0xa9, 0x1c, 0xe3, 0x75, 0xe5, 0x18, 0x6f,
+ 0x2a, 0xc7, 0xf8, 0xab, 0x72, 0x8c, 0x5f, 0xff, 0x76, 0x06, 0xaf, 0x0e, 0xee, 0xfa, 0x67, 0xf8,
+ 0x27, 0x00, 0x00, 0xff, 0xff, 0x76, 0x8e, 0x48, 0x7e, 0x52, 0x08, 0x00, 0x00,
}
func (m *Endpoint) Marshal() (dAtA []byte, err error) {
@@ -492,6 +522,20 @@ func (m *EndpointHints) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if len(m.ForNodes) > 0 {
+ for iNdEx := len(m.ForNodes) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ForNodes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
if len(m.ForZones) > 0 {
for iNdEx := len(m.ForZones) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -671,6 +715,34 @@ func (m *EndpointSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *ForNode) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ForNode) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ForNode) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func (m *ForZone) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -781,6 +853,12 @@ func (m *EndpointHints) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
+ if len(m.ForNodes) > 0 {
+ for _, e := range m.ForNodes {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
return n
}
@@ -850,6 +928,17 @@ func (m *EndpointSliceList) Size() (n int) {
return n
}
+func (m *ForNode) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m *ForZone) Size() (n int) {
if m == nil {
return 0
@@ -914,8 +1003,14 @@ func (this *EndpointHints) String() string {
repeatedStringForForZones += strings.Replace(strings.Replace(f.String(), "ForZone", "ForZone", 1), `&`, ``, 1) + ","
}
repeatedStringForForZones += "}"
+ repeatedStringForForNodes := "[]ForNode{"
+ for _, f := range this.ForNodes {
+ repeatedStringForForNodes += strings.Replace(strings.Replace(f.String(), "ForNode", "ForNode", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForForNodes += "}"
s := strings.Join([]string{`&EndpointHints{`,
`ForZones:` + repeatedStringForForZones + `,`,
+ `ForNodes:` + repeatedStringForForNodes + `,`,
`}`,
}, "")
return s
@@ -972,6 +1067,16 @@ func (this *EndpointSliceList) String() string {
}, "")
return s
}
+func (this *ForNode) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ForNode{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *ForZone) String() string {
if this == nil {
return "nil"
@@ -1546,6 +1651,40 @@ func (m *EndpointHints) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ForNodes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ForNodes = append(m.ForNodes, ForNode{})
+ if err := m.ForNodes[len(m.ForNodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -2036,6 +2175,88 @@ func (m *EndpointSliceList) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *ForNode) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ForNode: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ForNode: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *ForZone) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/vendor/k8s.io/api/discovery/v1beta1/generated.proto
index 55828dd97..907050da1 100644
--- a/vendor/k8s.io/api/discovery/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/discovery/v1beta1/generated.proto
@@ -114,6 +114,13 @@ message EndpointHints {
// enable topology aware routing. May contain a maximum of 8 entries.
// +listType=atomic
repeated ForZone forZones = 1;
+
+ // forNodes indicates the node(s) this endpoint should be consumed by when
+ // using topology aware routing. May contain a maximum of 8 entries.
+ // This is an Alpha feature and is only used when the PreferSameTrafficDistribution
+ // feature gate is enabled.
+ // +listType=atomic
+ repeated ForNode forNodes = 2;
}
// EndpointPort represents a Port used by an EndpointSlice
@@ -189,6 +196,12 @@ message EndpointSliceList {
repeated EndpointSlice items = 2;
}
+// ForNode provides information about which nodes should consume this endpoint.
+message ForNode {
+ // name represents the name of the node.
+ optional string name = 1;
+}
+
// ForZone provides information about which zones should consume this endpoint.
message ForZone {
// name represents the name of the zone.
diff --git a/vendor/k8s.io/api/discovery/v1beta1/types.go b/vendor/k8s.io/api/discovery/v1beta1/types.go
index defd8e2ce..fa9d1eae4 100644
--- a/vendor/k8s.io/api/discovery/v1beta1/types.go
+++ b/vendor/k8s.io/api/discovery/v1beta1/types.go
@@ -161,6 +161,13 @@ type EndpointHints struct {
// enable topology aware routing. May contain a maximum of 8 entries.
// +listType=atomic
ForZones []ForZone `json:"forZones,omitempty" protobuf:"bytes,1,name=forZones"`
+
+ // forNodes indicates the node(s) this endpoint should be consumed by when
+ // using topology aware routing. May contain a maximum of 8 entries.
+ // This is an Alpha feature and is only used when the PreferSameTrafficDistribution
+ // feature gate is enabled.
+ // +listType=atomic
+ ForNodes []ForNode `json:"forNodes,omitempty" protobuf:"bytes,2,name=forNodes"`
}
// ForZone provides information about which zones should consume this endpoint.
@@ -169,6 +176,12 @@ type ForZone struct {
Name string `json:"name" protobuf:"bytes,1,name=name"`
}
+// ForNode provides information about which nodes should consume this endpoint.
+type ForNode struct {
+ // name represents the name of the node.
+ Name string `json:"name" protobuf:"bytes,1,name=name"`
+}
+
// EndpointPort represents a Port used by an EndpointSlice
type EndpointPort struct {
// name represents the name of this port. All ports in an EndpointSlice must have a unique name.
diff --git a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
index 847d4d58e..72aa0cb9b 100644
--- a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
@@ -56,6 +56,7 @@ func (EndpointConditions) SwaggerDoc() map[string]string {
var map_EndpointHints = map[string]string{
"": "EndpointHints provides hints describing how an endpoint should be consumed.",
"forZones": "forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing. May contain a maximum of 8 entries.",
+ "forNodes": "forNodes indicates the node(s) this endpoint should be consumed by when using topology aware routing. May contain a maximum of 8 entries. This is an Alpha feature and is only used when the PreferSameTrafficDistribution feature gate is enabled.",
}
func (EndpointHints) SwaggerDoc() map[string]string {
@@ -96,6 +97,15 @@ func (EndpointSliceList) SwaggerDoc() map[string]string {
return map_EndpointSliceList
}
+var map_ForNode = map[string]string{
+ "": "ForNode provides information about which nodes should consume this endpoint.",
+ "name": "name represents the name of the node.",
+}
+
+func (ForNode) SwaggerDoc() map[string]string {
+ return map_ForNode
+}
+
var map_ForZone = map[string]string{
"": "ForZone provides information about which zones should consume this endpoint.",
"name": "name represents the name of the zone.",
diff --git a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go
index 13b9544b0..72490d6ad 100644
--- a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go
@@ -114,6 +114,11 @@ func (in *EndpointHints) DeepCopyInto(out *EndpointHints) {
*out = make([]ForZone, len(*in))
copy(*out, *in)
}
+ if in.ForNodes != nil {
+ in, out := &in.ForNodes, &out.ForNodes
+ *out = make([]ForNode, len(*in))
+ copy(*out, *in)
+ }
return
}
@@ -236,6 +241,22 @@ func (in *EndpointSliceList) DeepCopyObject() runtime.Object {
return nil
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ForNode) DeepCopyInto(out *ForNode) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForNode.
+func (in *ForNode) DeepCopy() *ForNode {
+ if in == nil {
+ return nil
+ }
+ out := new(ForNode)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ForZone) DeepCopyInto(out *ForZone) {
*out = *in
diff --git a/vendor/k8s.io/api/events/v1/doc.go b/vendor/k8s.io/api/events/v1/doc.go
index 5fe700ffc..911639044 100644
--- a/vendor/k8s.io/api/events/v1/doc.go
+++ b/vendor/k8s.io/api/events/v1/doc.go
@@ -20,4 +20,4 @@ limitations under the License.
// +k8s:prerelease-lifecycle-gen=true
// +groupName=events.k8s.io
-package v1 // import "k8s.io/api/events/v1"
+package v1
diff --git a/vendor/k8s.io/api/events/v1beta1/doc.go b/vendor/k8s.io/api/events/v1beta1/doc.go
index 46048a65b..e4864294f 100644
--- a/vendor/k8s.io/api/events/v1beta1/doc.go
+++ b/vendor/k8s.io/api/events/v1beta1/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// +groupName=events.k8s.io
-package v1beta1 // import "k8s.io/api/events/v1beta1"
+package v1beta1
diff --git a/vendor/k8s.io/api/extensions/v1beta1/doc.go b/vendor/k8s.io/api/extensions/v1beta1/doc.go
index c9af49d55..7770fab5d 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/doc.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/doc.go
@@ -19,4 +19,4 @@ limitations under the License.
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
-package v1beta1 // import "k8s.io/api/extensions/v1beta1"
+package v1beta1
diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go
index 818486f39..35b9a4ff2 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go
@@ -1364,185 +1364,187 @@ func init() {
}
var fileDescriptor_90a532284de28347 = []byte{
- // 2842 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x24, 0x47,
- 0x15, 0xdf, 0x9e, 0xf1, 0xd8, 0xe3, 0xe7, 0xb5, 0xbd, 0x5b, 0xeb, 0xac, 0x1d, 0x2f, 0xb1, 0xa3,
- 0x46, 0x84, 0x4d, 0xd8, 0x9d, 0x61, 0x37, 0xc9, 0x92, 0x0f, 0x29, 0x61, 0xc7, 0xbb, 0xc9, 0x3a,
- 0xb1, 0xc7, 0x93, 0x9a, 0x71, 0x82, 0x22, 0x02, 0xb4, 0x7b, 0xca, 0xe3, 0x8e, 0x7b, 0xba, 0x47,
- 0xdd, 0x35, 0x66, 0x7d, 0x03, 0xc1, 0x25, 0x27, 0xb8, 0x04, 0x38, 0x22, 0x21, 0x71, 0xe5, 0xca,
- 0x21, 0x44, 0x20, 0x82, 0xb4, 0x42, 0x1c, 0x22, 0x71, 0x20, 0x27, 0x8b, 0x38, 0x27, 0xc4, 0x3f,
- 0x80, 0xf6, 0x84, 0xea, 0xa3, 0xab, 0xbf, 0xed, 0x1e, 0xe3, 0x58, 0x04, 0x71, 0x5a, 0x4f, 0xbd,
- 0xf7, 0x7e, 0xf5, 0xaa, 0xea, 0xd5, 0x7b, 0xbf, 0xaa, 0xea, 0x85, 0xeb, 0xbb, 0xcf, 0xf9, 0x35,
- 0xcb, 0xad, 0x1b, 0x03, 0xab, 0x4e, 0xee, 0x53, 0xe2, 0xf8, 0x96, 0xeb, 0xf8, 0xf5, 0xbd, 0x1b,
- 0x5b, 0x84, 0x1a, 0x37, 0xea, 0x3d, 0xe2, 0x10, 0xcf, 0xa0, 0xa4, 0x5b, 0x1b, 0x78, 0x2e, 0x75,
- 0xd1, 0x63, 0x42, 0xbd, 0x66, 0x0c, 0xac, 0x5a, 0xa8, 0x5e, 0x93, 0xea, 0x8b, 0xd7, 0x7b, 0x16,
- 0xdd, 0x19, 0x6e, 0xd5, 0x4c, 0xb7, 0x5f, 0xef, 0xb9, 0x3d, 0xb7, 0xce, 0xad, 0xb6, 0x86, 0xdb,
- 0xfc, 0x17, 0xff, 0xc1, 0xff, 0x12, 0x68, 0x8b, 0x7a, 0xa4, 0x73, 0xd3, 0xf5, 0x48, 0x7d, 0x2f,
- 0xd5, 0xe3, 0xe2, 0x33, 0xa1, 0x4e, 0xdf, 0x30, 0x77, 0x2c, 0x87, 0x78, 0xfb, 0xf5, 0xc1, 0x6e,
- 0x8f, 0x35, 0xf8, 0xf5, 0x3e, 0xa1, 0x46, 0x96, 0x55, 0x3d, 0xcf, 0xca, 0x1b, 0x3a, 0xd4, 0xea,
- 0x93, 0x94, 0xc1, 0xad, 0xe3, 0x0c, 0x7c, 0x73, 0x87, 0xf4, 0x8d, 0x94, 0xdd, 0xd3, 0x79, 0x76,
- 0x43, 0x6a, 0xd9, 0x75, 0xcb, 0xa1, 0x3e, 0xf5, 0x92, 0x46, 0xfa, 0xfb, 0x25, 0x98, 0xbc, 0x63,
- 0x90, 0xbe, 0xeb, 0xb4, 0x09, 0x45, 0xdf, 0x83, 0x2a, 0x1b, 0x46, 0xd7, 0xa0, 0xc6, 0x82, 0xf6,
- 0xb8, 0x76, 0x75, 0xea, 0xe6, 0xd7, 0x6b, 0xe1, 0x34, 0x2b, 0xd4, 0xda, 0x60, 0xb7, 0xc7, 0x1a,
- 0xfc, 0x1a, 0xd3, 0xae, 0xed, 0xdd, 0xa8, 0x6d, 0x6c, 0xbd, 0x4b, 0x4c, 0xba, 0x4e, 0xa8, 0xd1,
- 0x40, 0x0f, 0x0e, 0x96, 0xcf, 0x1d, 0x1e, 0x2c, 0x43, 0xd8, 0x86, 0x15, 0x2a, 0x6a, 0xc2, 0x98,
- 0x3f, 0x20, 0xe6, 0x42, 0x89, 0xa3, 0x5f, 0xab, 0x1d, 0xb9, 0x88, 0x35, 0xe5, 0x59, 0x7b, 0x40,
- 0xcc, 0xc6, 0x79, 0x89, 0x3c, 0xc6, 0x7e, 0x61, 0x8e, 0x83, 0xde, 0x84, 0x71, 0x9f, 0x1a, 0x74,
- 0xe8, 0x2f, 0x94, 0x39, 0x62, 0xad, 0x30, 0x22, 0xb7, 0x6a, 0xcc, 0x48, 0xcc, 0x71, 0xf1, 0x1b,
- 0x4b, 0x34, 0xfd, 0x1f, 0x25, 0x40, 0x4a, 0x77, 0xc5, 0x75, 0xba, 0x16, 0xb5, 0x5c, 0x07, 0xbd,
- 0x00, 0x63, 0x74, 0x7f, 0x40, 0xf8, 0xe4, 0x4c, 0x36, 0x9e, 0x08, 0x1c, 0xea, 0xec, 0x0f, 0xc8,
- 0xc3, 0x83, 0xe5, 0xcb, 0x69, 0x0b, 0x26, 0xc1, 0xdc, 0x06, 0xad, 0x29, 0x57, 0x4b, 0xdc, 0xfa,
- 0x99, 0x78, 0xd7, 0x0f, 0x0f, 0x96, 0x33, 0x82, 0xb0, 0xa6, 0x90, 0xe2, 0x0e, 0xa2, 0x3d, 0x40,
- 0xb6, 0xe1, 0xd3, 0x8e, 0x67, 0x38, 0xbe, 0xe8, 0xc9, 0xea, 0x13, 0x39, 0x09, 0x4f, 0x15, 0x5b,
- 0x34, 0x66, 0xd1, 0x58, 0x94, 0x5e, 0xa0, 0xb5, 0x14, 0x1a, 0xce, 0xe8, 0x01, 0x3d, 0x01, 0xe3,
- 0x1e, 0x31, 0x7c, 0xd7, 0x59, 0x18, 0xe3, 0xa3, 0x50, 0x13, 0x88, 0x79, 0x2b, 0x96, 0x52, 0xf4,
- 0x24, 0x4c, 0xf4, 0x89, 0xef, 0x1b, 0x3d, 0xb2, 0x50, 0xe1, 0x8a, 0xb3, 0x52, 0x71, 0x62, 0x5d,
- 0x34, 0xe3, 0x40, 0xae, 0x7f, 0xa0, 0xc1, 0xb4, 0x9a, 0xb9, 0x35, 0xcb, 0xa7, 0xe8, 0xdb, 0xa9,
- 0x38, 0xac, 0x15, 0x1b, 0x12, 0xb3, 0xe6, 0x51, 0x78, 0x41, 0xf6, 0x56, 0x0d, 0x5a, 0x22, 0x31,
- 0xb8, 0x0e, 0x15, 0x8b, 0x92, 0x3e, 0x5b, 0x87, 0xf2, 0xd5, 0xa9, 0x9b, 0x57, 0x8b, 0x86, 0x4c,
- 0x63, 0x5a, 0x82, 0x56, 0x56, 0x99, 0x39, 0x16, 0x28, 0xfa, 0xcf, 0xc6, 0x22, 0xee, 0xb3, 0xd0,
- 0x44, 0xef, 0x40, 0xd5, 0x27, 0x36, 0x31, 0xa9, 0xeb, 0x49, 0xf7, 0x9f, 0x2e, 0xe8, 0xbe, 0xb1,
- 0x45, 0xec, 0xb6, 0x34, 0x6d, 0x9c, 0x67, 0xfe, 0x07, 0xbf, 0xb0, 0x82, 0x44, 0x6f, 0x40, 0x95,
- 0x92, 0xfe, 0xc0, 0x36, 0x28, 0x91, 0xfb, 0xe8, 0xcb, 0xd1, 0x21, 0xb0, 0xc8, 0x61, 0x60, 0x2d,
- 0xb7, 0xdb, 0x91, 0x6a, 0x7c, 0xfb, 0xa8, 0x29, 0x09, 0x5a, 0xb1, 0x82, 0x41, 0x7b, 0x30, 0x33,
- 0x1c, 0x74, 0x99, 0x26, 0x65, 0xd9, 0xa1, 0xb7, 0x2f, 0x23, 0xe9, 0x56, 0xd1, 0xb9, 0xd9, 0x8c,
- 0x59, 0x37, 0x2e, 0xcb, 0xbe, 0x66, 0xe2, 0xed, 0x38, 0xd1, 0x0b, 0xba, 0x0d, 0xb3, 0x7d, 0xcb,
- 0xc1, 0xc4, 0xe8, 0xee, 0xb7, 0x89, 0xe9, 0x3a, 0x5d, 0x9f, 0x87, 0x55, 0xa5, 0x31, 0x2f, 0x01,
- 0x66, 0xd7, 0xe3, 0x62, 0x9c, 0xd4, 0x47, 0xaf, 0x01, 0x0a, 0x86, 0xf1, 0xaa, 0x48, 0x6e, 0x96,
- 0xeb, 0xf0, 0x98, 0x2b, 0x87, 0xc1, 0xdd, 0x49, 0x69, 0xe0, 0x0c, 0x2b, 0xb4, 0x06, 0x73, 0x1e,
- 0xd9, 0xb3, 0xd8, 0x18, 0xef, 0x59, 0x3e, 0x75, 0xbd, 0xfd, 0x35, 0xab, 0x6f, 0xd1, 0x85, 0x71,
- 0xee, 0xd3, 0xc2, 0xe1, 0xc1, 0xf2, 0x1c, 0xce, 0x90, 0xe3, 0x4c, 0x2b, 0xfd, 0xe7, 0xe3, 0x30,
- 0x9b, 0xc8, 0x37, 0xe8, 0x4d, 0xb8, 0x6c, 0x0e, 0x3d, 0x8f, 0x38, 0xb4, 0x39, 0xec, 0x6f, 0x11,
- 0xaf, 0x6d, 0xee, 0x90, 0xee, 0xd0, 0x26, 0x5d, 0x1e, 0x28, 0x95, 0xc6, 0x92, 0xf4, 0xf8, 0xf2,
- 0x4a, 0xa6, 0x16, 0xce, 0xb1, 0x66, 0xb3, 0xe0, 0xf0, 0xa6, 0x75, 0xcb, 0xf7, 0x15, 0x66, 0x89,
- 0x63, 0xaa, 0x59, 0x68, 0xa6, 0x34, 0x70, 0x86, 0x15, 0xf3, 0xb1, 0x4b, 0x7c, 0xcb, 0x23, 0xdd,
- 0xa4, 0x8f, 0xe5, 0xb8, 0x8f, 0x77, 0x32, 0xb5, 0x70, 0x8e, 0x35, 0x7a, 0x16, 0xa6, 0x44, 0x6f,
- 0x7c, 0xfd, 0xe4, 0x42, 0x5f, 0x92, 0x60, 0x53, 0xcd, 0x50, 0x84, 0xa3, 0x7a, 0x6c, 0x68, 0xee,
- 0x96, 0x4f, 0xbc, 0x3d, 0xd2, 0xcd, 0x5f, 0xe0, 0x8d, 0x94, 0x06, 0xce, 0xb0, 0x62, 0x43, 0x13,
- 0x11, 0x98, 0x1a, 0xda, 0x78, 0x7c, 0x68, 0x9b, 0x99, 0x5a, 0x38, 0xc7, 0x9a, 0xc5, 0xb1, 0x70,
- 0xf9, 0xf6, 0x9e, 0x61, 0xd9, 0xc6, 0x96, 0x4d, 0x16, 0x26, 0xe2, 0x71, 0xdc, 0x8c, 0x8b, 0x71,
- 0x52, 0x1f, 0xbd, 0x0a, 0x17, 0x45, 0xd3, 0xa6, 0x63, 0x28, 0x90, 0x2a, 0x07, 0x79, 0x54, 0x82,
- 0x5c, 0x6c, 0x26, 0x15, 0x70, 0xda, 0x06, 0xbd, 0x00, 0x33, 0xa6, 0x6b, 0xdb, 0x3c, 0x1e, 0x57,
- 0xdc, 0xa1, 0x43, 0x17, 0x26, 0x39, 0x0a, 0x62, 0xfb, 0x71, 0x25, 0x26, 0xc1, 0x09, 0x4d, 0x44,
- 0x00, 0xcc, 0xa0, 0xe0, 0xf8, 0x0b, 0xc0, 0xf3, 0xe3, 0x8d, 0xa2, 0x39, 0x40, 0x95, 0xaa, 0x90,
- 0x03, 0xa8, 0x26, 0x1f, 0x47, 0x80, 0xf5, 0x3f, 0x6b, 0x30, 0x9f, 0x93, 0x3a, 0xd0, 0xcb, 0xb1,
- 0x12, 0xfb, 0xb5, 0x44, 0x89, 0xbd, 0x92, 0x63, 0x16, 0xa9, 0xb3, 0x0e, 0x4c, 0x7b, 0x6c, 0x54,
- 0x4e, 0x4f, 0xa8, 0xc8, 0x1c, 0xf9, 0xec, 0x31, 0xc3, 0xc0, 0x51, 0x9b, 0x30, 0xe7, 0x5f, 0x3c,
- 0x3c, 0x58, 0x9e, 0x8e, 0xc9, 0x70, 0x1c, 0x5e, 0xff, 0x45, 0x09, 0xe0, 0x0e, 0x19, 0xd8, 0xee,
- 0x7e, 0x9f, 0x38, 0x67, 0xc1, 0xa1, 0x36, 0x62, 0x1c, 0xea, 0xfa, 0x71, 0xcb, 0xa3, 0x5c, 0xcb,
- 0x25, 0x51, 0x6f, 0x25, 0x48, 0x54, 0xbd, 0x38, 0xe4, 0xd1, 0x2c, 0xea, 0x6f, 0x65, 0xb8, 0x14,
- 0x2a, 0x87, 0x34, 0xea, 0xc5, 0xd8, 0x1a, 0x7f, 0x35, 0xb1, 0xc6, 0xf3, 0x19, 0x26, 0x9f, 0x1b,
- 0x8f, 0x7a, 0x17, 0x66, 0x18, 0xcb, 0x11, 0x6b, 0xc9, 0x39, 0xd4, 0xf8, 0xc8, 0x1c, 0x4a, 0x55,
- 0xbb, 0xb5, 0x18, 0x12, 0x4e, 0x20, 0xe7, 0x70, 0xb6, 0x89, 0x2f, 0x22, 0x67, 0xfb, 0x50, 0x83,
- 0x99, 0x70, 0x99, 0xce, 0x80, 0xb4, 0x35, 0xe3, 0xa4, 0xed, 0xc9, 0xc2, 0x21, 0x9a, 0xc3, 0xda,
- 0xfe, 0xc5, 0x08, 0xbe, 0x52, 0x62, 0x1b, 0x7c, 0xcb, 0x30, 0x77, 0xd1, 0xe3, 0x30, 0xe6, 0x18,
- 0xfd, 0x20, 0x32, 0xd5, 0x66, 0x69, 0x1a, 0x7d, 0x82, 0xb9, 0x04, 0xbd, 0xaf, 0x01, 0x92, 0x55,
- 0xe0, 0xb6, 0xe3, 0xb8, 0xd4, 0x10, 0xb9, 0x52, 0xb8, 0xb5, 0x5a, 0xd8, 0xad, 0xa0, 0xc7, 0xda,
- 0x66, 0x0a, 0xeb, 0xae, 0x43, 0xbd, 0xfd, 0x70, 0x91, 0xd3, 0x0a, 0x38, 0xc3, 0x01, 0x64, 0x00,
- 0x78, 0x12, 0xb3, 0xe3, 0xca, 0x8d, 0x7c, 0xbd, 0x40, 0xce, 0x63, 0x06, 0x2b, 0xae, 0xb3, 0x6d,
- 0xf5, 0xc2, 0xb4, 0x83, 0x15, 0x10, 0x8e, 0x80, 0x2e, 0xde, 0x85, 0xf9, 0x1c, 0x6f, 0xd1, 0x05,
- 0x28, 0xef, 0x92, 0x7d, 0x31, 0x6d, 0x98, 0xfd, 0x89, 0xe6, 0xa0, 0xb2, 0x67, 0xd8, 0x43, 0x91,
- 0x7e, 0x27, 0xb1, 0xf8, 0xf1, 0x42, 0xe9, 0x39, 0x4d, 0xff, 0xa0, 0x12, 0x8d, 0x1d, 0xce, 0x98,
- 0xaf, 0x42, 0xd5, 0x23, 0x03, 0xdb, 0x32, 0x0d, 0x5f, 0x12, 0x21, 0x4e, 0x7e, 0xb1, 0x6c, 0xc3,
- 0x4a, 0x1a, 0xe3, 0xd6, 0xa5, 0xcf, 0x97, 0x5b, 0x97, 0x4f, 0x87, 0x5b, 0x7f, 0x17, 0xaa, 0x7e,
- 0xc0, 0xaa, 0xc7, 0x38, 0xe4, 0x8d, 0x11, 0xf2, 0xab, 0x24, 0xd4, 0xaa, 0x03, 0x45, 0xa5, 0x15,
- 0x68, 0x16, 0x89, 0xae, 0x8c, 0x48, 0xa2, 0x4f, 0x95, 0xf8, 0xb2, 0x7c, 0x33, 0x30, 0x86, 0x3e,
- 0xe9, 0xf2, 0xdc, 0x56, 0x0d, 0xf3, 0x4d, 0x8b, 0xb7, 0x62, 0x29, 0x45, 0xef, 0xc4, 0x42, 0xb6,
- 0x7a, 0x92, 0x90, 0x9d, 0xc9, 0x0f, 0x57, 0xb4, 0x09, 0xf3, 0x03, 0xcf, 0xed, 0x79, 0xc4, 0xf7,
- 0xef, 0x10, 0xa3, 0x6b, 0x5b, 0x0e, 0x09, 0xe6, 0x47, 0x30, 0xa2, 0x2b, 0x87, 0x07, 0xcb, 0xf3,
- 0xad, 0x6c, 0x15, 0x9c, 0x67, 0xab, 0x3f, 0x18, 0x83, 0x0b, 0xc9, 0x0a, 0x98, 0x43, 0x52, 0xb5,
- 0x13, 0x91, 0xd4, 0x6b, 0x91, 0xcd, 0x20, 0x18, 0xbc, 0x5a, 0xfd, 0x8c, 0x0d, 0x71, 0x1b, 0x66,
- 0x65, 0x36, 0x08, 0x84, 0x92, 0xa6, 0xab, 0xd5, 0xdf, 0x8c, 0x8b, 0x71, 0x52, 0x1f, 0xbd, 0x08,
- 0xd3, 0x1e, 0xe7, 0xdd, 0x01, 0x80, 0xe0, 0xae, 0x8f, 0x48, 0x80, 0x69, 0x1c, 0x15, 0xe2, 0xb8,
- 0x2e, 0xe3, 0xad, 0x21, 0x1d, 0x0d, 0x00, 0xc6, 0xe2, 0xbc, 0xf5, 0x76, 0x52, 0x01, 0xa7, 0x6d,
- 0xd0, 0x3a, 0x5c, 0x1a, 0x3a, 0x69, 0x28, 0x11, 0xca, 0x57, 0x24, 0xd4, 0xa5, 0xcd, 0xb4, 0x0a,
- 0xce, 0xb2, 0x43, 0xdb, 0x31, 0x2a, 0x3b, 0xce, 0xd3, 0xf3, 0xcd, 0xc2, 0x1b, 0xaf, 0x30, 0x97,
- 0xcd, 0xa0, 0xdb, 0xd5, 0xa2, 0x74, 0x5b, 0xff, 0x83, 0x16, 0x2d, 0x42, 0x8a, 0x02, 0x1f, 0x77,
- 0xcb, 0x94, 0xb2, 0x88, 0xb0, 0x23, 0x37, 0x9b, 0xfd, 0xde, 0x1a, 0x89, 0xfd, 0x86, 0xc5, 0xf3,
- 0x78, 0xfa, 0xfb, 0x47, 0x0d, 0x66, 0xef, 0x75, 0x3a, 0xad, 0x55, 0x87, 0xef, 0x96, 0x96, 0x41,
- 0x77, 0x58, 0x15, 0x1d, 0x18, 0x74, 0x27, 0x59, 0x45, 0x99, 0x0c, 0x73, 0x09, 0x7a, 0x06, 0xaa,
- 0xec, 0x5f, 0xe6, 0x38, 0x0f, 0xd7, 0x49, 0x9e, 0x64, 0xaa, 0x2d, 0xd9, 0xf6, 0x30, 0xf2, 0x37,
- 0x56, 0x9a, 0xe8, 0x5b, 0x30, 0xc1, 0xf6, 0x36, 0x71, 0xba, 0x05, 0xc9, 0xaf, 0x74, 0xaa, 0x21,
- 0x8c, 0x42, 0x3e, 0x23, 0x1b, 0x70, 0x00, 0xa7, 0xef, 0xc2, 0x5c, 0x64, 0x10, 0x78, 0x68, 0x93,
- 0x37, 0x59, 0xbd, 0x42, 0x6d, 0xa8, 0xb0, 0xde, 0x59, 0x55, 0x2a, 0x17, 0xb8, 0x5e, 0x4c, 0x4c,
- 0x44, 0xc8, 0x3d, 0xd8, 0x2f, 0x1f, 0x0b, 0x2c, 0x7d, 0x03, 0x26, 0x56, 0x5b, 0x0d, 0xdb, 0x15,
- 0x7c, 0xc3, 0xb4, 0xba, 0x5e, 0x72, 0xa6, 0x56, 0x56, 0xef, 0x60, 0xcc, 0x25, 0x48, 0x87, 0x71,
- 0x72, 0xdf, 0x24, 0x03, 0xca, 0x29, 0xc6, 0x64, 0x03, 0x58, 0x22, 0xbd, 0xcb, 0x5b, 0xb0, 0x94,
- 0xe8, 0x3f, 0x29, 0xc1, 0x84, 0xec, 0xf6, 0x0c, 0xce, 0x1f, 0x6b, 0xb1, 0xf3, 0xc7, 0x53, 0xc5,
- 0x96, 0x20, 0xf7, 0xf0, 0xd1, 0x49, 0x1c, 0x3e, 0xae, 0x15, 0xc4, 0x3b, 0xfa, 0xe4, 0xf1, 0x5e,
- 0x09, 0x66, 0xe2, 0x8b, 0x8f, 0x9e, 0x85, 0x29, 0x96, 0x6a, 0x2d, 0x93, 0x34, 0x43, 0x86, 0xa7,
- 0xae, 0x1f, 0xda, 0xa1, 0x08, 0x47, 0xf5, 0x50, 0x4f, 0x99, 0xb5, 0x5c, 0x8f, 0xca, 0x41, 0xe7,
- 0x4f, 0xe9, 0x90, 0x5a, 0x76, 0x4d, 0x5c, 0xb6, 0xd7, 0x56, 0x1d, 0xba, 0xe1, 0xb5, 0xa9, 0x67,
- 0x39, 0xbd, 0x54, 0x47, 0x0c, 0x0c, 0x47, 0x91, 0xd1, 0x5b, 0x2c, 0xed, 0xfb, 0xee, 0xd0, 0x33,
- 0x49, 0x16, 0x7d, 0x0b, 0xa8, 0x07, 0xdb, 0x08, 0xdd, 0x35, 0xd7, 0x34, 0x6c, 0xb1, 0x38, 0x98,
- 0x6c, 0x13, 0x8f, 0x38, 0x26, 0x09, 0x28, 0x93, 0x80, 0xc0, 0x0a, 0x4c, 0xff, 0xad, 0x06, 0x53,
- 0x72, 0x2e, 0xce, 0x80, 0xa8, 0xbf, 0x1e, 0x27, 0xea, 0x4f, 0x14, 0xdc, 0xa1, 0xd9, 0x2c, 0xfd,
- 0x77, 0x1a, 0x2c, 0x06, 0xae, 0xbb, 0x46, 0xb7, 0x61, 0xd8, 0x86, 0x63, 0x12, 0x2f, 0x88, 0xf5,
- 0x45, 0x28, 0x59, 0x03, 0xb9, 0x92, 0x20, 0x01, 0x4a, 0xab, 0x2d, 0x5c, 0xb2, 0x06, 0xac, 0x8a,
- 0xee, 0xb8, 0x3e, 0xe5, 0x6c, 0x5e, 0x1c, 0x14, 0x95, 0xd7, 0xf7, 0x64, 0x3b, 0x56, 0x1a, 0x68,
- 0x13, 0x2a, 0x03, 0xd7, 0xa3, 0xac, 0x72, 0x95, 0x13, 0xeb, 0x7b, 0x84, 0xd7, 0x6c, 0xdd, 0x64,
- 0x20, 0x86, 0x3b, 0x9d, 0xc1, 0x60, 0x81, 0xa6, 0xff, 0x50, 0x83, 0x47, 0x33, 0xfc, 0x97, 0xa4,
- 0xa1, 0x0b, 0x13, 0x96, 0x10, 0xca, 0xf4, 0xf2, 0x7c, 0xb1, 0x6e, 0x33, 0xa6, 0x22, 0x4c, 0x6d,
- 0x41, 0x0a, 0x0b, 0xa0, 0xf5, 0x5f, 0x69, 0x70, 0x31, 0xe5, 0x2f, 0x4f, 0xd1, 0x2c, 0x9e, 0x25,
- 0xdb, 0x56, 0x29, 0x9a, 0x85, 0x25, 0x97, 0xa0, 0xd7, 0xa1, 0xca, 0xdf, 0x88, 0x4c, 0xd7, 0x96,
- 0x13, 0x58, 0x0f, 0x26, 0xb0, 0x25, 0xdb, 0x1f, 0x1e, 0x2c, 0x5f, 0xc9, 0x38, 0x6b, 0x07, 0x62,
- 0xac, 0x00, 0xd0, 0x32, 0x54, 0x88, 0xe7, 0xb9, 0x9e, 0x4c, 0xf6, 0x93, 0x6c, 0xa6, 0xee, 0xb2,
- 0x06, 0x2c, 0xda, 0xf5, 0x5f, 0x87, 0x41, 0xca, 0xb2, 0x2f, 0xf3, 0x8f, 0x2d, 0x4e, 0x32, 0x31,
- 0xb2, 0xa5, 0xc3, 0x5c, 0x82, 0x86, 0x70, 0xc1, 0x4a, 0xa4, 0x6b, 0xb9, 0x3b, 0xeb, 0xc5, 0xa6,
- 0x51, 0x99, 0x35, 0x16, 0x24, 0xfc, 0x85, 0xa4, 0x04, 0xa7, 0xba, 0xd0, 0x09, 0xa4, 0xb4, 0xd0,
- 0x1b, 0x30, 0xb6, 0x43, 0xe9, 0x20, 0xe3, 0xb2, 0xff, 0x98, 0x22, 0x11, 0xba, 0x50, 0xe5, 0xa3,
- 0xeb, 0x74, 0x5a, 0x98, 0x43, 0xe9, 0xbf, 0x2f, 0xa9, 0xf9, 0xe0, 0x27, 0xa4, 0x6f, 0xaa, 0xd1,
- 0xae, 0xd8, 0x86, 0xef, 0xf3, 0x14, 0x26, 0x4e, 0xf3, 0x73, 0x11, 0xc7, 0x95, 0x0c, 0xa7, 0xb4,
- 0x51, 0x27, 0x2c, 0x9e, 0xda, 0x49, 0x8a, 0xe7, 0x54, 0x56, 0xe1, 0x44, 0xf7, 0xa0, 0x4c, 0xed,
- 0xa2, 0xa7, 0x72, 0x89, 0xd8, 0x59, 0x6b, 0x37, 0xa6, 0xe4, 0x94, 0x97, 0x3b, 0x6b, 0x6d, 0xcc,
- 0x20, 0xd0, 0x06, 0x54, 0xbc, 0xa1, 0x4d, 0x58, 0x1d, 0x28, 0x17, 0xaf, 0x2b, 0x6c, 0x06, 0xc3,
- 0xcd, 0xc7, 0x7e, 0xf9, 0x58, 0xe0, 0xe8, 0x3f, 0xd2, 0x60, 0x3a, 0x56, 0x2d, 0x90, 0x07, 0xe7,
- 0xed, 0xc8, 0xde, 0x91, 0xf3, 0xf0, 0xdc, 0xe8, 0xbb, 0x4e, 0x6e, 0xfa, 0x39, 0xd9, 0xef, 0xf9,
- 0xa8, 0x0c, 0xc7, 0xfa, 0xd0, 0x0d, 0x80, 0x70, 0xd8, 0x6c, 0x1f, 0xb0, 0xe0, 0x15, 0x1b, 0x5e,
- 0xee, 0x03, 0x16, 0xd3, 0x3e, 0x16, 0xed, 0xe8, 0x26, 0x80, 0x4f, 0x4c, 0x8f, 0xd0, 0x66, 0x98,
- 0xb8, 0x54, 0x39, 0x6e, 0x2b, 0x09, 0x8e, 0x68, 0xe9, 0x7f, 0xd2, 0x60, 0xba, 0x49, 0xe8, 0xf7,
- 0x5d, 0x6f, 0xb7, 0xe5, 0xda, 0x96, 0xb9, 0x7f, 0x06, 0x24, 0x00, 0xc7, 0x48, 0xc0, 0x71, 0xf9,
- 0x32, 0xe6, 0x5d, 0x1e, 0x15, 0xd0, 0x3f, 0xd4, 0x60, 0x3e, 0xa6, 0x79, 0x37, 0xcc, 0x07, 0x2a,
- 0x41, 0x6b, 0x85, 0x12, 0x74, 0x0c, 0x86, 0x25, 0xb5, 0xec, 0x04, 0x8d, 0xd6, 0xa0, 0x44, 0x5d,
- 0x19, 0xbd, 0xa3, 0x61, 0x12, 0xe2, 0x85, 0x35, 0xa7, 0xe3, 0xe2, 0x12, 0x75, 0xd9, 0x42, 0x2c,
- 0xc4, 0xb4, 0xa2, 0x19, 0xed, 0x73, 0x1a, 0x01, 0x86, 0xb1, 0x6d, 0xcf, 0xed, 0x9f, 0x78, 0x0c,
- 0x6a, 0x21, 0x5e, 0xf1, 0xdc, 0x3e, 0xe6, 0x58, 0xfa, 0x47, 0x1a, 0x5c, 0x8c, 0x69, 0x9e, 0x01,
- 0x6f, 0x78, 0x23, 0xce, 0x1b, 0xae, 0x8d, 0x32, 0x90, 0x1c, 0xf6, 0xf0, 0x51, 0x29, 0x31, 0x0c,
- 0x36, 0x60, 0xb4, 0x0d, 0x53, 0x03, 0xb7, 0xdb, 0x3e, 0x85, 0x07, 0xda, 0x59, 0xc6, 0xe7, 0x5a,
- 0x21, 0x16, 0x8e, 0x02, 0xa3, 0xfb, 0x70, 0x91, 0x51, 0x0b, 0x7f, 0x60, 0x98, 0xa4, 0x7d, 0x0a,
- 0x57, 0x56, 0x8f, 0xf0, 0x17, 0xa0, 0x24, 0x22, 0x4e, 0x77, 0x82, 0xd6, 0x61, 0xc2, 0x1a, 0xf0,
- 0xf3, 0x85, 0x24, 0x92, 0xc7, 0x92, 0x30, 0x71, 0x1a, 0x11, 0x29, 0x5e, 0xfe, 0xc0, 0x01, 0x86,
- 0xfe, 0xd7, 0x64, 0x34, 0x70, 0xba, 0xfa, 0x6a, 0x84, 0x1e, 0xc8, 0xb7, 0x9a, 0x93, 0x51, 0x83,
- 0xa6, 0x64, 0x22, 0x27, 0x65, 0xd6, 0xd5, 0x04, 0x6f, 0xf9, 0x0a, 0x4c, 0x10, 0xa7, 0xcb, 0xc9,
- 0xba, 0xb8, 0x08, 0xe1, 0xa3, 0xba, 0x2b, 0x9a, 0x70, 0x20, 0xd3, 0x7f, 0x5c, 0x4e, 0x8c, 0x8a,
- 0x97, 0xd9, 0x77, 0x4f, 0x2d, 0x38, 0x14, 0xe1, 0xcf, 0x0d, 0x90, 0xad, 0x90, 0xfe, 0x89, 0x98,
- 0xff, 0xc6, 0x28, 0x31, 0x1f, 0xad, 0x7f, 0xb9, 0xe4, 0x0f, 0x7d, 0x07, 0xc6, 0x89, 0xe8, 0x42,
- 0x54, 0xd5, 0x5b, 0xa3, 0x74, 0x11, 0xa6, 0xdf, 0xf0, 0x9c, 0x25, 0xdb, 0x24, 0x2a, 0x7a, 0x99,
- 0xcd, 0x17, 0xd3, 0x65, 0xc7, 0x12, 0xc1, 0x9e, 0x27, 0x1b, 0x8f, 0x89, 0x61, 0xab, 0xe6, 0x87,
- 0x07, 0xcb, 0x10, 0xfe, 0xc4, 0x51, 0x0b, 0xfe, 0x7a, 0x26, 0xef, 0x6c, 0xce, 0xe6, 0x0b, 0xa4,
- 0xd1, 0x5e, 0xcf, 0x42, 0xd7, 0x4e, 0xed, 0xf5, 0x2c, 0x02, 0x79, 0xf4, 0x19, 0xf6, 0x9f, 0x25,
- 0xb8, 0x14, 0x2a, 0x17, 0x7e, 0x3d, 0xcb, 0x30, 0xf9, 0xff, 0x57, 0x48, 0xc5, 0x5e, 0xb4, 0xc2,
- 0xa9, 0xfb, 0xef, 0x7b, 0xd1, 0x0a, 0x7d, 0xcb, 0xa9, 0x76, 0xbf, 0x29, 0x45, 0x07, 0x30, 0xe2,
- 0xb3, 0xca, 0x29, 0x7c, 0x88, 0xf3, 0x85, 0x7b, 0x99, 0xd1, 0xff, 0x52, 0x86, 0x0b, 0xc9, 0xdd,
- 0x18, 0xbb, 0x7d, 0xd7, 0x8e, 0xbd, 0x7d, 0x6f, 0xc1, 0xdc, 0xf6, 0xd0, 0xb6, 0xf7, 0xf9, 0x18,
- 0x22, 0x57, 0xf0, 0xe2, 0xde, 0xfe, 0x4b, 0xd2, 0x72, 0xee, 0x95, 0x0c, 0x1d, 0x9c, 0x69, 0x99,
- 0xbe, 0x8c, 0x1f, 0xfb, 0x4f, 0x2f, 0xe3, 0x2b, 0x27, 0xb8, 0x8c, 0xcf, 0x7e, 0xcf, 0x28, 0x9f,
- 0xe8, 0x3d, 0xe3, 0x24, 0x37, 0xf1, 0x19, 0x49, 0xec, 0xd8, 0xaf, 0x4a, 0x5e, 0x82, 0x99, 0xf8,
- 0xeb, 0x90, 0x58, 0x4b, 0xf1, 0x40, 0x25, 0xdf, 0x62, 0x22, 0x6b, 0x29, 0xda, 0xb1, 0xd2, 0xd0,
- 0x0f, 0x35, 0xb8, 0x9c, 0xfd, 0x15, 0x08, 0xb2, 0x61, 0xa6, 0x6f, 0xdc, 0x8f, 0x7e, 0x99, 0xa3,
- 0x9d, 0x90, 0xad, 0xf0, 0x67, 0x81, 0xf5, 0x18, 0x16, 0x4e, 0x60, 0xa3, 0xb7, 0xa1, 0xda, 0x37,
- 0xee, 0xb7, 0x87, 0x5e, 0x8f, 0x9c, 0x98, 0x15, 0xf1, 0x6d, 0xb4, 0x2e, 0x51, 0xb0, 0xc2, 0xd3,
- 0x3f, 0xd3, 0x60, 0x3e, 0xe7, 0xb2, 0xff, 0x7f, 0x68, 0x94, 0xef, 0x95, 0xa0, 0xd2, 0x36, 0x0d,
- 0x9b, 0x9c, 0x01, 0xa1, 0x78, 0x2d, 0x46, 0x28, 0x8e, 0xfb, 0x9a, 0x94, 0x7b, 0x95, 0xcb, 0x25,
- 0x70, 0x82, 0x4b, 0x3c, 0x55, 0x08, 0xed, 0x68, 0x1a, 0xf1, 0x3c, 0x4c, 0xaa, 0x4e, 0x47, 0xcb,
- 0x6e, 0xfa, 0x2f, 0x4b, 0x30, 0x15, 0xe9, 0x62, 0xc4, 0xdc, 0xb8, 0x1d, 0x2b, 0x08, 0xe5, 0x02,
- 0x37, 0x2d, 0x91, 0xbe, 0x6a, 0x41, 0x09, 0x10, 0x5f, 0x43, 0x84, 0xef, 0xdf, 0xe9, 0xca, 0xf0,
- 0x12, 0xcc, 0x50, 0xc3, 0xeb, 0x11, 0xaa, 0x68, 0xbb, 0xb8, 0x64, 0x54, 0x9f, 0xe5, 0x74, 0x62,
- 0x52, 0x9c, 0xd0, 0x5e, 0x7c, 0x11, 0xa6, 0x63, 0x9d, 0x8d, 0xf2, 0x31, 0x43, 0x63, 0xe5, 0xc1,
- 0xa7, 0x4b, 0xe7, 0x3e, 0xfe, 0x74, 0xe9, 0xdc, 0x27, 0x9f, 0x2e, 0x9d, 0xfb, 0xc1, 0xe1, 0x92,
- 0xf6, 0xe0, 0x70, 0x49, 0xfb, 0xf8, 0x70, 0x49, 0xfb, 0xe4, 0x70, 0x49, 0xfb, 0xfb, 0xe1, 0x92,
- 0xf6, 0xd3, 0xcf, 0x96, 0xce, 0xbd, 0xfd, 0xd8, 0x91, 0xff, 0xb7, 0xe1, 0xdf, 0x01, 0x00, 0x00,
- 0xff, 0xff, 0x5f, 0xd8, 0x14, 0x50, 0xfb, 0x30, 0x00, 0x00,
+ // 2875 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcf, 0x6f, 0x24, 0x47,
+ 0xf5, 0xdf, 0x9e, 0xf1, 0xd8, 0xe3, 0xe7, 0xb5, 0xbd, 0x5b, 0xeb, 0xac, 0x1d, 0xef, 0x37, 0x76,
+ 0xd4, 0x5f, 0x11, 0x36, 0x61, 0x77, 0x86, 0xdd, 0x24, 0x4b, 0x7e, 0x48, 0x09, 0x3b, 0xde, 0x4d,
+ 0xd6, 0x89, 0x7f, 0x4c, 0x6a, 0xc6, 0x09, 0x8a, 0x08, 0xd0, 0xee, 0x29, 0x8f, 0x3b, 0xee, 0xe9,
+ 0x1e, 0x75, 0xd7, 0x98, 0xf5, 0x0d, 0x04, 0x97, 0x9c, 0x40, 0x42, 0x21, 0x1c, 0x91, 0x90, 0xb8,
+ 0x72, 0xe5, 0x10, 0x22, 0x10, 0x41, 0x8a, 0x38, 0x45, 0xe2, 0x40, 0x4e, 0x16, 0x71, 0x4e, 0x88,
+ 0x7f, 0x00, 0xed, 0x09, 0xd5, 0x8f, 0xae, 0xfe, 0x6d, 0xf7, 0x0c, 0x5e, 0x8b, 0x20, 0x4e, 0xeb,
+ 0xa9, 0xf7, 0xde, 0xa7, 0x5e, 0x55, 0xbd, 0x7a, 0xef, 0x53, 0x55, 0xbd, 0x70, 0x7d, 0xef, 0x39,
+ 0xbf, 0x66, 0xb9, 0x75, 0xa3, 0x6f, 0xd5, 0xc9, 0x7d, 0x4a, 0x1c, 0xdf, 0x72, 0x1d, 0xbf, 0xbe,
+ 0x7f, 0x63, 0x9b, 0x50, 0xe3, 0x46, 0xbd, 0x4b, 0x1c, 0xe2, 0x19, 0x94, 0x74, 0x6a, 0x7d, 0xcf,
+ 0xa5, 0x2e, 0x7a, 0x4c, 0xa8, 0xd7, 0x8c, 0xbe, 0x55, 0x0b, 0xd5, 0x6b, 0x52, 0x7d, 0xf1, 0x7a,
+ 0xd7, 0xa2, 0xbb, 0x83, 0xed, 0x9a, 0xe9, 0xf6, 0xea, 0x5d, 0xb7, 0xeb, 0xd6, 0xb9, 0xd5, 0xf6,
+ 0x60, 0x87, 0xff, 0xe2, 0x3f, 0xf8, 0x5f, 0x02, 0x6d, 0x51, 0x8f, 0x74, 0x6e, 0xba, 0x1e, 0xa9,
+ 0xef, 0xa7, 0x7a, 0x5c, 0x7c, 0x26, 0xd4, 0xe9, 0x19, 0xe6, 0xae, 0xe5, 0x10, 0xef, 0xa0, 0xde,
+ 0xdf, 0xeb, 0xb2, 0x06, 0xbf, 0xde, 0x23, 0xd4, 0xc8, 0xb2, 0xaa, 0xe7, 0x59, 0x79, 0x03, 0x87,
+ 0x5a, 0x3d, 0x92, 0x32, 0xb8, 0x75, 0x92, 0x81, 0x6f, 0xee, 0x92, 0x9e, 0x91, 0xb2, 0x7b, 0x3a,
+ 0xcf, 0x6e, 0x40, 0x2d, 0xbb, 0x6e, 0x39, 0xd4, 0xa7, 0x5e, 0xd2, 0x48, 0x7f, 0xbf, 0x04, 0x93,
+ 0x77, 0x0c, 0xd2, 0x73, 0x9d, 0x16, 0xa1, 0xe8, 0x7b, 0x50, 0x65, 0xc3, 0xe8, 0x18, 0xd4, 0x58,
+ 0xd0, 0x1e, 0xd7, 0xae, 0x4e, 0xdd, 0xfc, 0x7a, 0x2d, 0x9c, 0x66, 0x85, 0x5a, 0xeb, 0xef, 0x75,
+ 0x59, 0x83, 0x5f, 0x63, 0xda, 0xb5, 0xfd, 0x1b, 0xb5, 0xcd, 0xed, 0x77, 0x89, 0x49, 0xd7, 0x09,
+ 0x35, 0x1a, 0xe8, 0x93, 0xc3, 0xe5, 0x73, 0x47, 0x87, 0xcb, 0x10, 0xb6, 0x61, 0x85, 0x8a, 0x36,
+ 0x60, 0xcc, 0xef, 0x13, 0x73, 0xa1, 0xc4, 0xd1, 0xaf, 0xd5, 0x8e, 0x5d, 0xc4, 0x9a, 0xf2, 0xac,
+ 0xd5, 0x27, 0x66, 0xe3, 0xbc, 0x44, 0x1e, 0x63, 0xbf, 0x30, 0xc7, 0x41, 0x6f, 0xc2, 0xb8, 0x4f,
+ 0x0d, 0x3a, 0xf0, 0x17, 0xca, 0x1c, 0xb1, 0x56, 0x18, 0x91, 0x5b, 0x35, 0x66, 0x24, 0xe6, 0xb8,
+ 0xf8, 0x8d, 0x25, 0x9a, 0xfe, 0xf7, 0x12, 0x20, 0xa5, 0xbb, 0xe2, 0x3a, 0x1d, 0x8b, 0x5a, 0xae,
+ 0x83, 0x5e, 0x80, 0x31, 0x7a, 0xd0, 0x27, 0x7c, 0x72, 0x26, 0x1b, 0x4f, 0x04, 0x0e, 0xb5, 0x0f,
+ 0xfa, 0xe4, 0xc1, 0xe1, 0xf2, 0xe5, 0xb4, 0x05, 0x93, 0x60, 0x6e, 0x83, 0xd6, 0x94, 0xab, 0x25,
+ 0x6e, 0xfd, 0x4c, 0xbc, 0xeb, 0x07, 0x87, 0xcb, 0x19, 0x41, 0x58, 0x53, 0x48, 0x71, 0x07, 0xd1,
+ 0x3e, 0x20, 0xdb, 0xf0, 0x69, 0xdb, 0x33, 0x1c, 0x5f, 0xf4, 0x64, 0xf5, 0x88, 0x9c, 0x84, 0xa7,
+ 0x8a, 0x2d, 0x1a, 0xb3, 0x68, 0x2c, 0x4a, 0x2f, 0xd0, 0x5a, 0x0a, 0x0d, 0x67, 0xf4, 0x80, 0x9e,
+ 0x80, 0x71, 0x8f, 0x18, 0xbe, 0xeb, 0x2c, 0x8c, 0xf1, 0x51, 0xa8, 0x09, 0xc4, 0xbc, 0x15, 0x4b,
+ 0x29, 0x7a, 0x12, 0x26, 0x7a, 0xc4, 0xf7, 0x8d, 0x2e, 0x59, 0xa8, 0x70, 0xc5, 0x59, 0xa9, 0x38,
+ 0xb1, 0x2e, 0x9a, 0x71, 0x20, 0xd7, 0x3f, 0xd4, 0x60, 0x5a, 0xcd, 0xdc, 0x9a, 0xe5, 0x53, 0xf4,
+ 0xed, 0x54, 0x1c, 0xd6, 0x8a, 0x0d, 0x89, 0x59, 0xf3, 0x28, 0xbc, 0x20, 0x7b, 0xab, 0x06, 0x2d,
+ 0x91, 0x18, 0x5c, 0x87, 0x8a, 0x45, 0x49, 0x8f, 0xad, 0x43, 0xf9, 0xea, 0xd4, 0xcd, 0xab, 0x45,
+ 0x43, 0xa6, 0x31, 0x2d, 0x41, 0x2b, 0xab, 0xcc, 0x1c, 0x0b, 0x14, 0xfd, 0xe7, 0x63, 0x11, 0xf7,
+ 0x59, 0x68, 0xa2, 0x77, 0xa0, 0xea, 0x13, 0x9b, 0x98, 0xd4, 0xf5, 0xa4, 0xfb, 0x4f, 0x17, 0x74,
+ 0xdf, 0xd8, 0x26, 0x76, 0x4b, 0x9a, 0x36, 0xce, 0x33, 0xff, 0x83, 0x5f, 0x58, 0x41, 0xa2, 0x37,
+ 0xa0, 0x4a, 0x49, 0xaf, 0x6f, 0x1b, 0x94, 0xc8, 0x7d, 0xf4, 0xff, 0xd1, 0x21, 0xb0, 0xc8, 0x61,
+ 0x60, 0x4d, 0xb7, 0xd3, 0x96, 0x6a, 0x7c, 0xfb, 0xa8, 0x29, 0x09, 0x5a, 0xb1, 0x82, 0x41, 0xfb,
+ 0x30, 0x33, 0xe8, 0x77, 0x98, 0x26, 0x65, 0xd9, 0xa1, 0x7b, 0x20, 0x23, 0xe9, 0x56, 0xd1, 0xb9,
+ 0xd9, 0x8a, 0x59, 0x37, 0x2e, 0xcb, 0xbe, 0x66, 0xe2, 0xed, 0x38, 0xd1, 0x0b, 0xba, 0x0d, 0xb3,
+ 0x3d, 0xcb, 0xc1, 0xc4, 0xe8, 0x1c, 0xb4, 0x88, 0xe9, 0x3a, 0x1d, 0x9f, 0x87, 0x55, 0xa5, 0x31,
+ 0x2f, 0x01, 0x66, 0xd7, 0xe3, 0x62, 0x9c, 0xd4, 0x47, 0xaf, 0x01, 0x0a, 0x86, 0xf1, 0xaa, 0x48,
+ 0x6e, 0x96, 0xeb, 0xf0, 0x98, 0x2b, 0x87, 0xc1, 0xdd, 0x4e, 0x69, 0xe0, 0x0c, 0x2b, 0xb4, 0x06,
+ 0x73, 0x1e, 0xd9, 0xb7, 0xd8, 0x18, 0xef, 0x59, 0x3e, 0x75, 0xbd, 0x83, 0x35, 0xab, 0x67, 0xd1,
+ 0x85, 0x71, 0xee, 0xd3, 0xc2, 0xd1, 0xe1, 0xf2, 0x1c, 0xce, 0x90, 0xe3, 0x4c, 0x2b, 0xfd, 0x83,
+ 0x71, 0x98, 0x4d, 0xe4, 0x1b, 0xf4, 0x26, 0x5c, 0x36, 0x07, 0x9e, 0x47, 0x1c, 0xba, 0x31, 0xe8,
+ 0x6d, 0x13, 0xaf, 0x65, 0xee, 0x92, 0xce, 0xc0, 0x26, 0x1d, 0x1e, 0x28, 0x95, 0xc6, 0x92, 0xf4,
+ 0xf8, 0xf2, 0x4a, 0xa6, 0x16, 0xce, 0xb1, 0x66, 0xb3, 0xe0, 0xf0, 0xa6, 0x75, 0xcb, 0xf7, 0x15,
+ 0x66, 0x89, 0x63, 0xaa, 0x59, 0xd8, 0x48, 0x69, 0xe0, 0x0c, 0x2b, 0xe6, 0x63, 0x87, 0xf8, 0x96,
+ 0x47, 0x3a, 0x49, 0x1f, 0xcb, 0x71, 0x1f, 0xef, 0x64, 0x6a, 0xe1, 0x1c, 0x6b, 0xf4, 0x2c, 0x4c,
+ 0x89, 0xde, 0xf8, 0xfa, 0xc9, 0x85, 0xbe, 0x24, 0xc1, 0xa6, 0x36, 0x42, 0x11, 0x8e, 0xea, 0xb1,
+ 0xa1, 0xb9, 0xdb, 0x3e, 0xf1, 0xf6, 0x49, 0x27, 0x7f, 0x81, 0x37, 0x53, 0x1a, 0x38, 0xc3, 0x8a,
+ 0x0d, 0x4d, 0x44, 0x60, 0x6a, 0x68, 0xe3, 0xf1, 0xa1, 0x6d, 0x65, 0x6a, 0xe1, 0x1c, 0x6b, 0x16,
+ 0xc7, 0xc2, 0xe5, 0xdb, 0xfb, 0x86, 0x65, 0x1b, 0xdb, 0x36, 0x59, 0x98, 0x88, 0xc7, 0xf1, 0x46,
+ 0x5c, 0x8c, 0x93, 0xfa, 0xe8, 0x55, 0xb8, 0x28, 0x9a, 0xb6, 0x1c, 0x43, 0x81, 0x54, 0x39, 0xc8,
+ 0xa3, 0x12, 0xe4, 0xe2, 0x46, 0x52, 0x01, 0xa7, 0x6d, 0xd0, 0x0b, 0x30, 0x63, 0xba, 0xb6, 0xcd,
+ 0xe3, 0x71, 0xc5, 0x1d, 0x38, 0x74, 0x61, 0x92, 0xa3, 0x20, 0xb6, 0x1f, 0x57, 0x62, 0x12, 0x9c,
+ 0xd0, 0x44, 0x04, 0xc0, 0x0c, 0x0a, 0x8e, 0xbf, 0x00, 0x3c, 0x3f, 0xde, 0x28, 0x9a, 0x03, 0x54,
+ 0xa9, 0x0a, 0x39, 0x80, 0x6a, 0xf2, 0x71, 0x04, 0x58, 0xff, 0xb3, 0x06, 0xf3, 0x39, 0xa9, 0x03,
+ 0xbd, 0x1c, 0x2b, 0xb1, 0x5f, 0x4b, 0x94, 0xd8, 0x2b, 0x39, 0x66, 0x91, 0x3a, 0xeb, 0xc0, 0xb4,
+ 0xc7, 0x46, 0xe5, 0x74, 0x85, 0x8a, 0xcc, 0x91, 0xcf, 0x9e, 0x30, 0x0c, 0x1c, 0xb5, 0x09, 0x73,
+ 0xfe, 0xc5, 0xa3, 0xc3, 0xe5, 0xe9, 0x98, 0x0c, 0xc7, 0xe1, 0xf5, 0x5f, 0x94, 0x00, 0xee, 0x90,
+ 0xbe, 0xed, 0x1e, 0xf4, 0x88, 0x73, 0x16, 0x1c, 0x6a, 0x33, 0xc6, 0xa1, 0xae, 0x9f, 0xb4, 0x3c,
+ 0xca, 0xb5, 0x5c, 0x12, 0xf5, 0x56, 0x82, 0x44, 0xd5, 0x8b, 0x43, 0x1e, 0xcf, 0xa2, 0xfe, 0x5a,
+ 0x86, 0x4b, 0xa1, 0x72, 0x48, 0xa3, 0x5e, 0x8c, 0xad, 0xf1, 0x57, 0x13, 0x6b, 0x3c, 0x9f, 0x61,
+ 0xf2, 0xd0, 0x78, 0xd4, 0xbb, 0x30, 0xc3, 0x58, 0x8e, 0x58, 0x4b, 0xce, 0xa1, 0xc6, 0x87, 0xe6,
+ 0x50, 0xaa, 0xda, 0xad, 0xc5, 0x90, 0x70, 0x02, 0x39, 0x87, 0xb3, 0x4d, 0x7c, 0x19, 0x39, 0xdb,
+ 0x47, 0x1a, 0xcc, 0x84, 0xcb, 0x74, 0x06, 0xa4, 0x6d, 0x23, 0x4e, 0xda, 0x9e, 0x2c, 0x1c, 0xa2,
+ 0x39, 0xac, 0xed, 0x9f, 0x8c, 0xe0, 0x2b, 0x25, 0xb6, 0xc1, 0xb7, 0x0d, 0x73, 0x0f, 0x3d, 0x0e,
+ 0x63, 0x8e, 0xd1, 0x0b, 0x22, 0x53, 0x6d, 0x96, 0x0d, 0xa3, 0x47, 0x30, 0x97, 0xa0, 0xf7, 0x35,
+ 0x40, 0xb2, 0x0a, 0xdc, 0x76, 0x1c, 0x97, 0x1a, 0x22, 0x57, 0x0a, 0xb7, 0x56, 0x0b, 0xbb, 0x15,
+ 0xf4, 0x58, 0xdb, 0x4a, 0x61, 0xdd, 0x75, 0xa8, 0x77, 0x10, 0x2e, 0x72, 0x5a, 0x01, 0x67, 0x38,
+ 0x80, 0x0c, 0x00, 0x4f, 0x62, 0xb6, 0x5d, 0xb9, 0x91, 0xaf, 0x17, 0xc8, 0x79, 0xcc, 0x60, 0xc5,
+ 0x75, 0x76, 0xac, 0x6e, 0x98, 0x76, 0xb0, 0x02, 0xc2, 0x11, 0xd0, 0xc5, 0xbb, 0x30, 0x9f, 0xe3,
+ 0x2d, 0xba, 0x00, 0xe5, 0x3d, 0x72, 0x20, 0xa6, 0x0d, 0xb3, 0x3f, 0xd1, 0x1c, 0x54, 0xf6, 0x0d,
+ 0x7b, 0x20, 0xd2, 0xef, 0x24, 0x16, 0x3f, 0x5e, 0x28, 0x3d, 0xa7, 0xe9, 0x1f, 0x56, 0xa2, 0xb1,
+ 0xc3, 0x19, 0xf3, 0x55, 0xa8, 0x7a, 0xa4, 0x6f, 0x5b, 0xa6, 0xe1, 0x4b, 0x22, 0xc4, 0xc9, 0x2f,
+ 0x96, 0x6d, 0x58, 0x49, 0x63, 0xdc, 0xba, 0xf4, 0x70, 0xb9, 0x75, 0xf9, 0x74, 0xb8, 0xf5, 0x77,
+ 0xa1, 0xea, 0x07, 0xac, 0x7a, 0x8c, 0x43, 0xde, 0x18, 0x22, 0xbf, 0x4a, 0x42, 0xad, 0x3a, 0x50,
+ 0x54, 0x5a, 0x81, 0x66, 0x91, 0xe8, 0xca, 0x90, 0x24, 0xfa, 0x54, 0x89, 0x2f, 0xcb, 0x37, 0x7d,
+ 0x63, 0xe0, 0x93, 0x0e, 0xcf, 0x6d, 0xd5, 0x30, 0xdf, 0x34, 0x79, 0x2b, 0x96, 0x52, 0xf4, 0x4e,
+ 0x2c, 0x64, 0xab, 0xa3, 0x84, 0xec, 0x4c, 0x7e, 0xb8, 0xa2, 0x2d, 0x98, 0xef, 0x7b, 0x6e, 0xd7,
+ 0x23, 0xbe, 0x7f, 0x87, 0x18, 0x1d, 0xdb, 0x72, 0x48, 0x30, 0x3f, 0x82, 0x11, 0x5d, 0x39, 0x3a,
+ 0x5c, 0x9e, 0x6f, 0x66, 0xab, 0xe0, 0x3c, 0x5b, 0xfd, 0x67, 0x15, 0xb8, 0x90, 0xac, 0x80, 0x39,
+ 0x24, 0x55, 0x1b, 0x89, 0xa4, 0x5e, 0x8b, 0x6c, 0x06, 0xc1, 0xe0, 0xd5, 0xea, 0x67, 0x6c, 0x88,
+ 0xdb, 0x30, 0x2b, 0xb3, 0x41, 0x20, 0x94, 0x34, 0x5d, 0xad, 0xfe, 0x56, 0x5c, 0x8c, 0x93, 0xfa,
+ 0xe8, 0x45, 0x98, 0xf6, 0x38, 0xef, 0x0e, 0x00, 0x04, 0x77, 0x7d, 0x44, 0x02, 0x4c, 0xe3, 0xa8,
+ 0x10, 0xc7, 0x75, 0x19, 0x6f, 0x0d, 0xe9, 0x68, 0x00, 0x30, 0x16, 0xe7, 0xad, 0xb7, 0x93, 0x0a,
+ 0x38, 0x6d, 0x83, 0xd6, 0xe1, 0xd2, 0xc0, 0x49, 0x43, 0x89, 0x50, 0xbe, 0x22, 0xa1, 0x2e, 0x6d,
+ 0xa5, 0x55, 0x70, 0x96, 0x1d, 0x5a, 0x85, 0x4b, 0x94, 0x78, 0x3d, 0xcb, 0x31, 0xa8, 0xe5, 0x74,
+ 0x15, 0x9c, 0x58, 0xf9, 0x79, 0x06, 0xd5, 0x4e, 0x8b, 0x71, 0x96, 0x0d, 0xda, 0x89, 0xb1, 0xe2,
+ 0x71, 0x9e, 0xe9, 0x6f, 0x16, 0xde, 0xc3, 0x85, 0x69, 0x71, 0x06, 0x73, 0xaf, 0x16, 0x65, 0xee,
+ 0xfa, 0x1f, 0xb4, 0x68, 0x3d, 0x53, 0x6c, 0xfa, 0xa4, 0x0b, 0xab, 0x94, 0x45, 0x84, 0x68, 0xb9,
+ 0xd9, 0x44, 0xfa, 0xd6, 0x50, 0x44, 0x3a, 0xac, 0xc3, 0x27, 0x33, 0xe9, 0x3f, 0x6a, 0x30, 0x7b,
+ 0xaf, 0xdd, 0x6e, 0xae, 0x3a, 0x7c, 0xe3, 0x35, 0x0d, 0xba, 0xcb, 0x0a, 0x72, 0xdf, 0xa0, 0xbb,
+ 0xc9, 0x82, 0xcc, 0x64, 0x98, 0x4b, 0xd0, 0x33, 0x50, 0x65, 0xff, 0x32, 0xc7, 0x79, 0xe4, 0x4f,
+ 0xf2, 0x7c, 0x55, 0x6d, 0xca, 0xb6, 0x07, 0x91, 0xbf, 0xb1, 0xd2, 0x44, 0xdf, 0x82, 0x09, 0x96,
+ 0x26, 0x88, 0xd3, 0x29, 0xc8, 0xa3, 0xa5, 0x53, 0x0d, 0x61, 0x14, 0x52, 0x23, 0xd9, 0x80, 0x03,
+ 0x38, 0x7d, 0x0f, 0xe6, 0x22, 0x83, 0xc0, 0x03, 0x9b, 0xbc, 0xc9, 0x4a, 0x1f, 0x6a, 0x41, 0x85,
+ 0xf5, 0xce, 0x0a, 0x5c, 0xb9, 0xc0, 0x4d, 0x65, 0x62, 0x22, 0x42, 0x1a, 0xc3, 0x7e, 0xf9, 0x58,
+ 0x60, 0xe9, 0x9b, 0x30, 0xb1, 0xda, 0x6c, 0xd8, 0xae, 0xa0, 0x2e, 0xa6, 0xd5, 0xf1, 0x92, 0x33,
+ 0xb5, 0xb2, 0x7a, 0x07, 0x63, 0x2e, 0x41, 0x3a, 0x8c, 0x93, 0xfb, 0x26, 0xe9, 0x53, 0xce, 0x56,
+ 0x26, 0x1b, 0xc0, 0x72, 0xf2, 0x5d, 0xde, 0x82, 0xa5, 0x44, 0xff, 0x49, 0x09, 0x26, 0x64, 0xb7,
+ 0x67, 0x70, 0x94, 0x59, 0x8b, 0x1d, 0x65, 0x9e, 0x2a, 0xb6, 0x04, 0xb9, 0xe7, 0x98, 0x76, 0xe2,
+ 0x1c, 0x73, 0xad, 0x20, 0xde, 0xf1, 0x87, 0x98, 0xf7, 0x4a, 0x30, 0x13, 0x5f, 0x7c, 0xf4, 0x2c,
+ 0x4c, 0xb1, 0xac, 0x6d, 0x99, 0x64, 0x23, 0x24, 0x8b, 0xea, 0x26, 0xa3, 0x15, 0x8a, 0x70, 0x54,
+ 0x0f, 0x75, 0x95, 0x59, 0xd3, 0xf5, 0xa8, 0x1c, 0x74, 0xfe, 0x94, 0x0e, 0xa8, 0x65, 0xd7, 0xc4,
+ 0xbd, 0x7d, 0x6d, 0xd5, 0xa1, 0x9b, 0x5e, 0x8b, 0x7a, 0x96, 0xd3, 0x4d, 0x75, 0xc4, 0xc0, 0x70,
+ 0x14, 0x19, 0xbd, 0xc5, 0x2a, 0x88, 0xef, 0x0e, 0x3c, 0x93, 0x64, 0x31, 0xc1, 0x80, 0xc5, 0xb0,
+ 0x8d, 0xd0, 0x59, 0x73, 0x4d, 0xc3, 0x16, 0x8b, 0x83, 0xc9, 0x0e, 0xf1, 0x88, 0x63, 0x92, 0x80,
+ 0x7d, 0x09, 0x08, 0xac, 0xc0, 0xf4, 0xdf, 0x6a, 0x30, 0x25, 0xe7, 0xe2, 0x0c, 0x38, 0xff, 0xeb,
+ 0x71, 0xce, 0xff, 0x44, 0xc1, 0x1d, 0x9a, 0x4d, 0xf8, 0x7f, 0xa7, 0xc1, 0x62, 0xe0, 0xba, 0x6b,
+ 0x74, 0x1a, 0x86, 0x6d, 0x38, 0x26, 0xf1, 0x82, 0x58, 0x5f, 0x84, 0x92, 0xd5, 0x97, 0x2b, 0x09,
+ 0x12, 0xa0, 0xb4, 0xda, 0xc4, 0x25, 0xab, 0xcf, 0x0a, 0xf2, 0xae, 0xeb, 0x53, 0x7e, 0x30, 0x10,
+ 0x67, 0x4e, 0xe5, 0xf5, 0x3d, 0xd9, 0x8e, 0x95, 0x06, 0xda, 0x82, 0x4a, 0xdf, 0xf5, 0x28, 0x2b,
+ 0x82, 0xe5, 0xc4, 0xfa, 0x1e, 0xe3, 0x35, 0x5b, 0x37, 0x19, 0x88, 0xe1, 0x4e, 0x67, 0x30, 0x58,
+ 0xa0, 0xe9, 0x3f, 0xd4, 0xe0, 0xd1, 0x0c, 0xff, 0x25, 0xff, 0xe8, 0xc0, 0x84, 0x25, 0x84, 0x32,
+ 0xbd, 0x3c, 0x5f, 0xac, 0xdb, 0x8c, 0xa9, 0x08, 0x53, 0x5b, 0x90, 0xc2, 0x02, 0x68, 0xfd, 0x57,
+ 0x1a, 0x5c, 0x4c, 0xf9, 0xcb, 0x53, 0x34, 0x8b, 0x67, 0x49, 0xdc, 0x55, 0x8a, 0x66, 0x61, 0xc9,
+ 0x25, 0xe8, 0x75, 0xa8, 0xf2, 0xe7, 0x26, 0xd3, 0xb5, 0xe5, 0x04, 0xd6, 0x83, 0x09, 0x6c, 0xca,
+ 0xf6, 0x07, 0x87, 0xcb, 0x57, 0x32, 0x8e, 0xed, 0x81, 0x18, 0x2b, 0x00, 0xb4, 0x0c, 0x15, 0xe2,
+ 0x79, 0xae, 0x27, 0x93, 0xfd, 0x24, 0x9b, 0xa9, 0xbb, 0xac, 0x01, 0x8b, 0x76, 0xfd, 0xd7, 0x61,
+ 0x90, 0xb2, 0xec, 0xcb, 0xfc, 0x63, 0x8b, 0x93, 0x4c, 0x8c, 0x6c, 0xe9, 0x30, 0x97, 0xa0, 0x01,
+ 0x5c, 0xb0, 0x12, 0xe9, 0x5a, 0xee, 0xce, 0x7a, 0xb1, 0x69, 0x54, 0x66, 0x8d, 0x05, 0x09, 0x7f,
+ 0x21, 0x29, 0xc1, 0xa9, 0x2e, 0x74, 0x02, 0x29, 0x2d, 0xf4, 0x06, 0x8c, 0xed, 0x52, 0xda, 0xcf,
+ 0x78, 0x37, 0x38, 0xa1, 0x48, 0x84, 0x2e, 0x54, 0xf9, 0xe8, 0xda, 0xed, 0x26, 0xe6, 0x50, 0xfa,
+ 0xef, 0x4b, 0x6a, 0x3e, 0xf8, 0x61, 0xeb, 0x9b, 0x6a, 0xb4, 0x2b, 0xb6, 0xe1, 0xfb, 0x3c, 0x85,
+ 0x89, 0x8b, 0x81, 0xb9, 0x88, 0xe3, 0x4a, 0x86, 0x53, 0xda, 0xa8, 0x1d, 0x16, 0x4f, 0x6d, 0x94,
+ 0xe2, 0x39, 0x95, 0x55, 0x38, 0xd1, 0x3d, 0x28, 0x53, 0xbb, 0xe8, 0x01, 0x5f, 0x22, 0xb6, 0xd7,
+ 0x5a, 0x8d, 0x29, 0x39, 0xe5, 0xe5, 0xf6, 0x5a, 0x0b, 0x33, 0x08, 0xb4, 0x09, 0x15, 0x6f, 0x60,
+ 0x13, 0x56, 0x07, 0xca, 0xc5, 0xeb, 0x0a, 0x9b, 0xc1, 0x70, 0xf3, 0xb1, 0x5f, 0x3e, 0x16, 0x38,
+ 0xfa, 0x8f, 0x34, 0x98, 0x8e, 0x55, 0x0b, 0xe4, 0xc1, 0x79, 0x3b, 0xb2, 0x77, 0xe4, 0x3c, 0x3c,
+ 0x37, 0xfc, 0xae, 0x93, 0x9b, 0x7e, 0x4e, 0xf6, 0x7b, 0x3e, 0x2a, 0xc3, 0xb1, 0x3e, 0x74, 0x03,
+ 0x20, 0x1c, 0x36, 0xdb, 0x07, 0x2c, 0x78, 0xc5, 0x86, 0x97, 0xfb, 0x80, 0xc5, 0xb4, 0x8f, 0x45,
+ 0x3b, 0xba, 0x09, 0xe0, 0x13, 0xd3, 0x23, 0x74, 0x23, 0x4c, 0x5c, 0xaa, 0x1c, 0xb7, 0x94, 0x04,
+ 0x47, 0xb4, 0xf4, 0x3f, 0x69, 0x30, 0xbd, 0x41, 0xe8, 0xf7, 0x5d, 0x6f, 0xaf, 0xe9, 0xda, 0x96,
+ 0x79, 0x70, 0x06, 0x24, 0x00, 0xc7, 0x48, 0xc0, 0x49, 0xf9, 0x32, 0xe6, 0x5d, 0x1e, 0x15, 0xd0,
+ 0x3f, 0xd2, 0x60, 0x3e, 0xa6, 0x79, 0x37, 0xcc, 0x07, 0x2a, 0x41, 0x6b, 0x85, 0x12, 0x74, 0x0c,
+ 0x86, 0x25, 0xb5, 0xec, 0x04, 0x8d, 0xd6, 0xa0, 0x44, 0x5d, 0x19, 0xbd, 0xc3, 0x61, 0x12, 0xe2,
+ 0x85, 0x35, 0xa7, 0xed, 0xe2, 0x12, 0x75, 0xd9, 0x42, 0x2c, 0xc4, 0xb4, 0xa2, 0x19, 0xed, 0x21,
+ 0x8d, 0x00, 0xc3, 0xd8, 0x8e, 0xe7, 0xf6, 0x46, 0x1e, 0x83, 0x5a, 0x88, 0x57, 0x3c, 0xb7, 0x87,
+ 0x39, 0x96, 0xfe, 0xb1, 0x06, 0x17, 0x63, 0x9a, 0x67, 0xc0, 0x1b, 0xde, 0x88, 0xf3, 0x86, 0x6b,
+ 0xc3, 0x0c, 0x24, 0x87, 0x3d, 0x7c, 0x5c, 0x4a, 0x0c, 0x83, 0x0d, 0x18, 0xed, 0xc0, 0x54, 0xdf,
+ 0xed, 0xb4, 0x4e, 0xe1, 0xad, 0x77, 0x96, 0xf1, 0xb9, 0x66, 0x88, 0x85, 0xa3, 0xc0, 0xe8, 0x3e,
+ 0x5c, 0x64, 0xd4, 0xc2, 0xef, 0x1b, 0x26, 0x69, 0x9d, 0xc2, 0xed, 0xd7, 0x23, 0xfc, 0x31, 0x29,
+ 0x89, 0x88, 0xd3, 0x9d, 0xa0, 0x75, 0x98, 0xb0, 0xfa, 0xfc, 0x7c, 0x21, 0x89, 0xe4, 0x89, 0x24,
+ 0x4c, 0x9c, 0x46, 0x44, 0x8a, 0x97, 0x3f, 0x70, 0x80, 0xa1, 0xff, 0x25, 0x19, 0x0d, 0x9c, 0xae,
+ 0xbe, 0x1a, 0xa1, 0x07, 0xf2, 0xd9, 0x67, 0x34, 0x6a, 0xb0, 0x21, 0x99, 0xc8, 0xa8, 0xcc, 0xba,
+ 0x9a, 0xe0, 0x2d, 0x5f, 0x81, 0x09, 0xe2, 0x74, 0x38, 0x59, 0x17, 0x77, 0x2a, 0x7c, 0x54, 0x77,
+ 0x45, 0x13, 0x0e, 0x64, 0xfa, 0x8f, 0xcb, 0x89, 0x51, 0xf1, 0x32, 0xfb, 0xee, 0xa9, 0x05, 0x87,
+ 0x22, 0xfc, 0xb9, 0x01, 0xb2, 0x1d, 0xd2, 0x3f, 0x11, 0xf3, 0xdf, 0x18, 0x26, 0xe6, 0xa3, 0xf5,
+ 0x2f, 0x97, 0xfc, 0xa1, 0xef, 0xc0, 0x38, 0x11, 0x5d, 0x88, 0xaa, 0x7a, 0x6b, 0x98, 0x2e, 0xc2,
+ 0xf4, 0x1b, 0x9e, 0xb3, 0x64, 0x9b, 0x44, 0x45, 0x2f, 0xb3, 0xf9, 0x62, 0xba, 0xec, 0x58, 0x22,
+ 0xd8, 0xf3, 0x64, 0xe3, 0x31, 0x31, 0x6c, 0xd5, 0xfc, 0xe0, 0x70, 0x19, 0xc2, 0x9f, 0x38, 0x6a,
+ 0xc1, 0x1f, 0xe2, 0xe4, 0x9d, 0xcd, 0xd9, 0x7c, 0xcc, 0x34, 0xdc, 0x43, 0x5c, 0xe8, 0xda, 0xa9,
+ 0x3d, 0xc4, 0x45, 0x20, 0x8f, 0x3f, 0xc3, 0xfe, 0xa3, 0x04, 0x97, 0x42, 0xe5, 0xc2, 0x0f, 0x71,
+ 0x19, 0x26, 0xff, 0xfb, 0xa0, 0xa9, 0xd8, 0xe3, 0x58, 0x38, 0x75, 0xff, 0x79, 0x8f, 0x63, 0xa1,
+ 0x6f, 0x39, 0xd5, 0xee, 0x37, 0xa5, 0xe8, 0x00, 0x86, 0x7c, 0xa1, 0x39, 0x85, 0x6f, 0x7a, 0xbe,
+ 0x74, 0x8f, 0x3c, 0xfa, 0x07, 0x63, 0x70, 0x21, 0xb9, 0x1b, 0x63, 0x17, 0xf9, 0xda, 0x89, 0x17,
+ 0xf9, 0x4d, 0x98, 0xdb, 0x19, 0xd8, 0xf6, 0x01, 0x1f, 0x43, 0xe4, 0x36, 0x5f, 0x3c, 0x01, 0xfc,
+ 0x9f, 0xb4, 0x9c, 0x7b, 0x25, 0x43, 0x07, 0x67, 0x5a, 0xa6, 0xef, 0xf5, 0xc7, 0xfe, 0xdd, 0x7b,
+ 0xfd, 0xca, 0x08, 0xf7, 0xfa, 0x39, 0x17, 0xf1, 0x13, 0x23, 0x5c, 0xc4, 0x67, 0xbf, 0xb2, 0x94,
+ 0x47, 0x7a, 0x65, 0x19, 0xe5, 0x52, 0x3f, 0x23, 0x1f, 0x9e, 0xf8, 0xad, 0xcb, 0x4b, 0x30, 0x13,
+ 0x7f, 0xb3, 0x12, 0x61, 0x21, 0x9e, 0xcd, 0xe4, 0x0b, 0x51, 0x24, 0x2c, 0x44, 0x3b, 0x56, 0x1a,
+ 0xfa, 0x91, 0x06, 0x97, 0xb3, 0xbf, 0x4d, 0x41, 0x36, 0xcc, 0xf4, 0x8c, 0xfb, 0xd1, 0xef, 0x85,
+ 0xb4, 0x11, 0x89, 0x0f, 0x7f, 0x61, 0x58, 0x8f, 0x61, 0xe1, 0x04, 0x36, 0x7a, 0x1b, 0xaa, 0x3d,
+ 0xe3, 0x7e, 0x6b, 0xe0, 0x75, 0xc9, 0xc8, 0x04, 0x8b, 0xef, 0xc8, 0x75, 0x89, 0x82, 0x15, 0x9e,
+ 0xfe, 0x85, 0x06, 0xf3, 0x39, 0xef, 0x06, 0xff, 0x45, 0xa3, 0x7c, 0xaf, 0x04, 0x95, 0x96, 0x69,
+ 0xd8, 0xe4, 0x0c, 0xb8, 0xc9, 0x6b, 0x31, 0x6e, 0x72, 0xd2, 0x37, 0xae, 0xdc, 0xab, 0x5c, 0x5a,
+ 0x82, 0x13, 0xb4, 0xe4, 0xa9, 0x42, 0x68, 0xc7, 0x33, 0x92, 0xe7, 0x61, 0x52, 0x75, 0x3a, 0x5c,
+ 0xa2, 0xd4, 0x7f, 0x59, 0x82, 0xa9, 0x48, 0x17, 0x43, 0xa6, 0xd9, 0x9d, 0x58, 0x6d, 0x29, 0x17,
+ 0xb8, 0xb4, 0x89, 0xf4, 0x55, 0x0b, 0xaa, 0x89, 0xf8, 0x46, 0x23, 0x7c, 0x95, 0x4f, 0x17, 0x99,
+ 0x97, 0x60, 0x86, 0x1a, 0x5e, 0x97, 0x50, 0x75, 0x02, 0x10, 0xf7, 0x95, 0xea, 0x63, 0xa1, 0x76,
+ 0x4c, 0x8a, 0x13, 0xda, 0x8b, 0x2f, 0xc2, 0x74, 0xac, 0xb3, 0x61, 0x3e, 0xb1, 0x68, 0xac, 0x7c,
+ 0xf2, 0xf9, 0xd2, 0xb9, 0x4f, 0x3f, 0x5f, 0x3a, 0xf7, 0xd9, 0xe7, 0x4b, 0xe7, 0x7e, 0x70, 0xb4,
+ 0xa4, 0x7d, 0x72, 0xb4, 0xa4, 0x7d, 0x7a, 0xb4, 0xa4, 0x7d, 0x76, 0xb4, 0xa4, 0xfd, 0xed, 0x68,
+ 0x49, 0xfb, 0xe9, 0x17, 0x4b, 0xe7, 0xde, 0x7e, 0xec, 0xd8, 0xff, 0x71, 0xf1, 0xaf, 0x00, 0x00,
+ 0x00, 0xff, 0xff, 0x6a, 0x79, 0xb9, 0xab, 0x91, 0x31, 0x00, 0x00,
}
func (m *DaemonSet) Marshal() (dAtA []byte, err error) {
@@ -2208,6 +2210,11 @@ func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.TerminatingReplicas != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas))
+ i--
+ dAtA[i] = 0x48
+ }
if m.CollisionCount != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount))
i--
@@ -3486,6 +3493,11 @@ func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.TerminatingReplicas != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas))
+ i--
+ dAtA[i] = 0x38
+ }
if len(m.Conditions) > 0 {
for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -4024,6 +4036,9 @@ func (m *DeploymentStatus) Size() (n int) {
if m.CollisionCount != nil {
n += 1 + sovGenerated(uint64(*m.CollisionCount))
}
+ if m.TerminatingReplicas != nil {
+ n += 1 + sovGenerated(uint64(*m.TerminatingReplicas))
+ }
return n
}
@@ -4502,6 +4517,9 @@ func (m *ReplicaSetStatus) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
+ if m.TerminatingReplicas != nil {
+ n += 1 + sovGenerated(uint64(*m.TerminatingReplicas))
+ }
return n
}
@@ -4793,6 +4811,7 @@ func (this *DeploymentStatus) String() string {
`Conditions:` + repeatedStringForConditions + `,`,
`ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
`CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
+ `TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`,
`}`,
}, "")
return s
@@ -5182,6 +5201,7 @@ func (this *ReplicaSetStatus) String() string {
`ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
`AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`,
`Conditions:` + repeatedStringForConditions + `,`,
+ `TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`,
`}`,
}, "")
return s
@@ -7567,6 +7587,26 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error {
}
}
m.CollisionCount = &v
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TerminatingReplicas = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -11162,6 +11202,26 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TerminatingReplicas = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto
index 9bbcaa0e2..70fcec0cc 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto
@@ -320,19 +320,19 @@ message DeploymentStatus {
// +optional
optional int64 observedGeneration = 1;
- // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+ // Total number of non-terminating pods targeted by this deployment (their labels match the selector).
// +optional
optional int32 replicas = 2;
- // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+ // Total number of non-terminating pods targeted by this deployment that have the desired template spec.
// +optional
optional int32 updatedReplicas = 3;
- // Total number of ready pods targeted by this deployment.
+ // Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
// +optional
optional int32 readyReplicas = 7;
- // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+ // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
// +optional
optional int32 availableReplicas = 4;
@@ -342,6 +342,13 @@ message DeploymentStatus {
// +optional
optional int32 unavailableReplicas = 5;
+ // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
+ // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // +optional
+ optional int32 terminatingReplicas = 9;
+
// Represents the latest available observations of a deployment's current state.
// +patchMergeKey=type
// +patchStrategy=merge
@@ -863,16 +870,16 @@ message ReplicaSetList {
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// List of ReplicaSets.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
repeated ReplicaSet items = 2;
}
// ReplicaSetSpec is the specification of a ReplicaSet.
message ReplicaSetSpec {
- // Replicas is the number of desired replicas.
+ // Replicas is the number of desired pods.
// This is a pointer to distinguish between explicit zero and unspecified.
// Defaults to 1.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
// +optional
optional int32 replicas = 1;
@@ -891,29 +898,36 @@ message ReplicaSetSpec {
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template
// +optional
optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
}
// ReplicaSetStatus represents the current status of a ReplicaSet.
message ReplicaSetStatus {
- // Replicas is the most recently observed number of replicas.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ // Replicas is the most recently observed number of non-terminating pods.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
optional int32 replicas = 1;
- // The number of pods that have labels matching the labels of the pod template of the replicaset.
+ // The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.
// +optional
optional int32 fullyLabeledReplicas = 2;
- // The number of ready replicas for this replica set.
+ // The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.
// +optional
optional int32 readyReplicas = 4;
- // The number of available replicas (ready for at least minReadySeconds) for this replica set.
+ // The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.
// +optional
optional int32 availableReplicas = 5;
+ // The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
+ // and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // +optional
+ optional int32 terminatingReplicas = 7;
+
// ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
// +optional
optional int64 observedGeneration = 3;
diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go
index 09f58692f..b80a7a7e1 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/types.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/types.go
@@ -245,19 +245,19 @@ type DeploymentStatus struct {
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
- // Total number of non-terminated pods targeted by this deployment (their labels match the selector).
+ // Total number of non-terminating pods targeted by this deployment (their labels match the selector).
// +optional
Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"`
- // Total number of non-terminated pods targeted by this deployment that have the desired template spec.
+ // Total number of non-terminating pods targeted by this deployment that have the desired template spec.
// +optional
UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"`
- // Total number of ready pods targeted by this deployment.
+ // Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
// +optional
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"`
- // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
+ // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
// +optional
AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"`
@@ -267,6 +267,13 @@ type DeploymentStatus struct {
// +optional
UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"`
+ // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
+ // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // +optional
+ TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,9,opt,name=terminatingReplicas"`
+
// Represents the latest available observations of a deployment's current state.
// +patchMergeKey=type
// +patchStrategy=merge
@@ -941,16 +948,16 @@ type ReplicaSetList struct {
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ReplicaSets.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// ReplicaSetSpec is the specification of a ReplicaSet.
type ReplicaSetSpec struct {
- // Replicas is the number of desired replicas.
+ // Replicas is the number of desired pods.
// This is a pointer to distinguish between explicit zero and unspecified.
// Defaults to 1.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
// +optional
Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
@@ -969,29 +976,36 @@ type ReplicaSetSpec struct {
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template
// +optional
Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
}
// ReplicaSetStatus represents the current status of a ReplicaSet.
type ReplicaSetStatus struct {
- // Replicas is the most recently observed number of replicas.
- // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
+ // Replicas is the most recently observed number of non-terminating pods.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
- // The number of pods that have labels matching the labels of the pod template of the replicaset.
+ // The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.
// +optional
FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
- // The number of ready replicas for this replica set.
+ // The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.
// +optional
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
- // The number of available replicas (ready for at least minReadySeconds) for this replica set.
+ // The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.
// +optional
AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
+ // The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
+ // and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // +optional
+ TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,7,opt,name=terminatingReplicas"`
+
// ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
index 408022c9d..923fab3aa 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
@@ -169,11 +169,12 @@ func (DeploymentSpec) SwaggerDoc() map[string]string {
var map_DeploymentStatus = map[string]string{
"": "DeploymentStatus is the most recently observed status of the Deployment.",
"observedGeneration": "The generation observed by the deployment controller.",
- "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).",
- "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.",
- "readyReplicas": "Total number of ready pods targeted by this deployment.",
- "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
+ "replicas": "Total number of non-terminating pods targeted by this deployment (their labels match the selector).",
+ "updatedReplicas": "Total number of non-terminating pods targeted by this deployment that have the desired template spec.",
+ "readyReplicas": "Total number of non-terminating pods targeted by this Deployment with a Ready Condition.",
+ "availableReplicas": "Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.",
"unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
+ "terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
"conditions": "Represents the latest available observations of a deployment's current state.",
"collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
}
@@ -435,7 +436,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string {
var map_ReplicaSetList = map[string]string{
"": "ReplicaSetList is a collection of ReplicaSets.",
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
- "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller",
+ "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset",
}
func (ReplicaSetList) SwaggerDoc() map[string]string {
@@ -444,10 +445,10 @@ func (ReplicaSetList) SwaggerDoc() map[string]string {
var map_ReplicaSetSpec = map[string]string{
"": "ReplicaSetSpec is the specification of a ReplicaSet.",
- "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
+ "replicas": "Replicas is the number of desired pods. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset",
"minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
"selector": "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
- "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+ "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template",
}
func (ReplicaSetSpec) SwaggerDoc() map[string]string {
@@ -456,10 +457,11 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string {
var map_ReplicaSetStatus = map[string]string{
"": "ReplicaSetStatus represents the current status of a ReplicaSet.",
- "replicas": "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
- "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.",
- "readyReplicas": "The number of ready replicas for this replica set.",
- "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.",
+ "replicas": "Replicas is the most recently observed number of non-terminating pods. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset",
+ "fullyLabeledReplicas": "The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.",
+ "readyReplicas": "The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.",
+ "availableReplicas": "The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.",
+ "terminatingReplicas": "The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
"observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
"conditions": "Represents the latest available observations of a replica set's current state.",
}
diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go
index 6b474ae48..2c7a8524e 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go
@@ -341,6 +341,11 @@ func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
*out = *in
+ if in.TerminatingReplicas != nil {
+ in, out := &in.TerminatingReplicas, &out.TerminatingReplicas
+ *out = new(int32)
+ **out = **in
+ }
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]DeploymentCondition, len(*in))
@@ -1045,6 +1050,11 @@ func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) {
*out = *in
+ if in.TerminatingReplicas != nil {
+ in, out := &in.TerminatingReplicas, &out.TerminatingReplicas
+ *out = new(int32)
+ **out = **in
+ }
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]ReplicaSetCondition, len(*in))
diff --git a/vendor/k8s.io/api/flowcontrol/v1/doc.go b/vendor/k8s.io/api/flowcontrol/v1/doc.go
index c9e7db158..ad5f45791 100644
--- a/vendor/k8s.io/api/flowcontrol/v1/doc.go
+++ b/vendor/k8s.io/api/flowcontrol/v1/doc.go
@@ -22,4 +22,4 @@ limitations under the License.
// +groupName=flowcontrol.apiserver.k8s.io
// Package v1 holds api types of version v1 for group "flowcontrol.apiserver.k8s.io".
-package v1 // import "k8s.io/api/flowcontrol/v1"
+package v1
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go b/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go
index 50897b7eb..20268c1f2 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go
+++ b/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go
@@ -22,4 +22,4 @@ limitations under the License.
// +groupName=flowcontrol.apiserver.k8s.io
// Package v1beta1 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io".
-package v1beta1 // import "k8s.io/api/flowcontrol/v1beta1"
+package v1beta1
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go b/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go
index 53b460d37..2dcad11ad 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go
+++ b/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go
@@ -22,4 +22,4 @@ limitations under the License.
// +groupName=flowcontrol.apiserver.k8s.io
// Package v1beta2 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io".
-package v1beta2 // import "k8s.io/api/flowcontrol/v1beta2"
+package v1beta2
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go b/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go
index cd60cfef7..95f4430d3 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go
+++ b/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go
@@ -22,4 +22,4 @@ limitations under the License.
// +groupName=flowcontrol.apiserver.k8s.io
// Package v1beta3 holds api types of version v1beta3 for group "flowcontrol.apiserver.k8s.io".
-package v1beta3 // import "k8s.io/api/flowcontrol/v1beta3"
+package v1beta3
diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go
index 1d13e7bab..e2093b7df 100644
--- a/vendor/k8s.io/api/networking/v1/doc.go
+++ b/vendor/k8s.io/api/networking/v1/doc.go
@@ -20,4 +20,4 @@ limitations under the License.
// +k8s:prerelease-lifecycle-gen=true
// +groupName=networking.k8s.io
-package v1 // import "k8s.io/api/networking/v1"
+package v1
diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go
index 7c023e690..062382b63 100644
--- a/vendor/k8s.io/api/networking/v1/generated.pb.go
+++ b/vendor/k8s.io/api/networking/v1/generated.pb.go
@@ -104,10 +104,94 @@ func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() {
var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo
+func (m *IPAddress) Reset() { *m = IPAddress{} }
+func (*IPAddress) ProtoMessage() {}
+func (*IPAddress) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2c41434372fec1d7, []int{2}
+}
+func (m *IPAddress) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IPAddress) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IPAddress.Merge(m, src)
+}
+func (m *IPAddress) XXX_Size() int {
+ return m.Size()
+}
+func (m *IPAddress) XXX_DiscardUnknown() {
+ xxx_messageInfo_IPAddress.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IPAddress proto.InternalMessageInfo
+
+func (m *IPAddressList) Reset() { *m = IPAddressList{} }
+func (*IPAddressList) ProtoMessage() {}
+func (*IPAddressList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2c41434372fec1d7, []int{3}
+}
+func (m *IPAddressList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IPAddressList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IPAddressList.Merge(m, src)
+}
+func (m *IPAddressList) XXX_Size() int {
+ return m.Size()
+}
+func (m *IPAddressList) XXX_DiscardUnknown() {
+ xxx_messageInfo_IPAddressList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IPAddressList proto.InternalMessageInfo
+
+func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} }
+func (*IPAddressSpec) ProtoMessage() {}
+func (*IPAddressSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2c41434372fec1d7, []int{4}
+}
+func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IPAddressSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IPAddressSpec.Merge(m, src)
+}
+func (m *IPAddressSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *IPAddressSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_IPAddressSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo
+
func (m *IPBlock) Reset() { *m = IPBlock{} }
func (*IPBlock) ProtoMessage() {}
func (*IPBlock) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{2}
+ return fileDescriptor_2c41434372fec1d7, []int{5}
}
func (m *IPBlock) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -135,7 +219,7 @@ var xxx_messageInfo_IPBlock proto.InternalMessageInfo
func (m *Ingress) Reset() { *m = Ingress{} }
func (*Ingress) ProtoMessage() {}
func (*Ingress) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{3}
+ return fileDescriptor_2c41434372fec1d7, []int{6}
}
func (m *Ingress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -163,7 +247,7 @@ var xxx_messageInfo_Ingress proto.InternalMessageInfo
func (m *IngressBackend) Reset() { *m = IngressBackend{} }
func (*IngressBackend) ProtoMessage() {}
func (*IngressBackend) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{4}
+ return fileDescriptor_2c41434372fec1d7, []int{7}
}
func (m *IngressBackend) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -191,7 +275,7 @@ var xxx_messageInfo_IngressBackend proto.InternalMessageInfo
func (m *IngressClass) Reset() { *m = IngressClass{} }
func (*IngressClass) ProtoMessage() {}
func (*IngressClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{5}
+ return fileDescriptor_2c41434372fec1d7, []int{8}
}
func (m *IngressClass) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -219,7 +303,7 @@ var xxx_messageInfo_IngressClass proto.InternalMessageInfo
func (m *IngressClassList) Reset() { *m = IngressClassList{} }
func (*IngressClassList) ProtoMessage() {}
func (*IngressClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{6}
+ return fileDescriptor_2c41434372fec1d7, []int{9}
}
func (m *IngressClassList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -247,7 +331,7 @@ var xxx_messageInfo_IngressClassList proto.InternalMessageInfo
func (m *IngressClassParametersReference) Reset() { *m = IngressClassParametersReference{} }
func (*IngressClassParametersReference) ProtoMessage() {}
func (*IngressClassParametersReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{7}
+ return fileDescriptor_2c41434372fec1d7, []int{10}
}
func (m *IngressClassParametersReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -275,7 +359,7 @@ var xxx_messageInfo_IngressClassParametersReference proto.InternalMessageInfo
func (m *IngressClassSpec) Reset() { *m = IngressClassSpec{} }
func (*IngressClassSpec) ProtoMessage() {}
func (*IngressClassSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{8}
+ return fileDescriptor_2c41434372fec1d7, []int{11}
}
func (m *IngressClassSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -303,7 +387,7 @@ var xxx_messageInfo_IngressClassSpec proto.InternalMessageInfo
func (m *IngressList) Reset() { *m = IngressList{} }
func (*IngressList) ProtoMessage() {}
func (*IngressList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{9}
+ return fileDescriptor_2c41434372fec1d7, []int{12}
}
func (m *IngressList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -331,7 +415,7 @@ var xxx_messageInfo_IngressList proto.InternalMessageInfo
func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} }
func (*IngressLoadBalancerIngress) ProtoMessage() {}
func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{10}
+ return fileDescriptor_2c41434372fec1d7, []int{13}
}
func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -359,7 +443,7 @@ var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo
func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} }
func (*IngressLoadBalancerStatus) ProtoMessage() {}
func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{11}
+ return fileDescriptor_2c41434372fec1d7, []int{14}
}
func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -387,7 +471,7 @@ var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo
func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} }
func (*IngressPortStatus) ProtoMessage() {}
func (*IngressPortStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{12}
+ return fileDescriptor_2c41434372fec1d7, []int{15}
}
func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -415,7 +499,7 @@ var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo
func (m *IngressRule) Reset() { *m = IngressRule{} }
func (*IngressRule) ProtoMessage() {}
func (*IngressRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{13}
+ return fileDescriptor_2c41434372fec1d7, []int{16}
}
func (m *IngressRule) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -443,7 +527,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo
func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} }
func (*IngressRuleValue) ProtoMessage() {}
func (*IngressRuleValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{14}
+ return fileDescriptor_2c41434372fec1d7, []int{17}
}
func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -471,7 +555,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo
func (m *IngressServiceBackend) Reset() { *m = IngressServiceBackend{} }
func (*IngressServiceBackend) ProtoMessage() {}
func (*IngressServiceBackend) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{15}
+ return fileDescriptor_2c41434372fec1d7, []int{18}
}
func (m *IngressServiceBackend) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -499,7 +583,7 @@ var xxx_messageInfo_IngressServiceBackend proto.InternalMessageInfo
func (m *IngressSpec) Reset() { *m = IngressSpec{} }
func (*IngressSpec) ProtoMessage() {}
func (*IngressSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{16}
+ return fileDescriptor_2c41434372fec1d7, []int{19}
}
func (m *IngressSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -527,7 +611,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo
func (m *IngressStatus) Reset() { *m = IngressStatus{} }
func (*IngressStatus) ProtoMessage() {}
func (*IngressStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{17}
+ return fileDescriptor_2c41434372fec1d7, []int{20}
}
func (m *IngressStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -555,7 +639,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo
func (m *IngressTLS) Reset() { *m = IngressTLS{} }
func (*IngressTLS) ProtoMessage() {}
func (*IngressTLS) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{18}
+ return fileDescriptor_2c41434372fec1d7, []int{21}
}
func (m *IngressTLS) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -583,7 +667,7 @@ var xxx_messageInfo_IngressTLS proto.InternalMessageInfo
func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} }
func (*NetworkPolicy) ProtoMessage() {}
func (*NetworkPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{19}
+ return fileDescriptor_2c41434372fec1d7, []int{22}
}
func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -611,7 +695,7 @@ var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo
func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} }
func (*NetworkPolicyEgressRule) ProtoMessage() {}
func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{20}
+ return fileDescriptor_2c41434372fec1d7, []int{23}
}
func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -639,7 +723,7 @@ var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo
func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} }
func (*NetworkPolicyIngressRule) ProtoMessage() {}
func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{21}
+ return fileDescriptor_2c41434372fec1d7, []int{24}
}
func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -667,7 +751,7 @@ var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo
func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} }
func (*NetworkPolicyList) ProtoMessage() {}
func (*NetworkPolicyList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{22}
+ return fileDescriptor_2c41434372fec1d7, []int{25}
}
func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -695,7 +779,7 @@ var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo
func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} }
func (*NetworkPolicyPeer) ProtoMessage() {}
func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{23}
+ return fileDescriptor_2c41434372fec1d7, []int{26}
}
func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -723,7 +807,7 @@ var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo
func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} }
func (*NetworkPolicyPort) ProtoMessage() {}
func (*NetworkPolicyPort) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{24}
+ return fileDescriptor_2c41434372fec1d7, []int{27}
}
func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -751,7 +835,7 @@ var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo
func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} }
func (*NetworkPolicySpec) ProtoMessage() {}
func (*NetworkPolicySpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{25}
+ return fileDescriptor_2c41434372fec1d7, []int{28}
}
func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -776,10 +860,38 @@ func (m *NetworkPolicySpec) XXX_DiscardUnknown() {
var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo
+func (m *ParentReference) Reset() { *m = ParentReference{} }
+func (*ParentReference) ProtoMessage() {}
+func (*ParentReference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2c41434372fec1d7, []int{29}
+}
+func (m *ParentReference) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ParentReference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ParentReference.Merge(m, src)
+}
+func (m *ParentReference) XXX_Size() int {
+ return m.Size()
+}
+func (m *ParentReference) XXX_DiscardUnknown() {
+ xxx_messageInfo_ParentReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ParentReference proto.InternalMessageInfo
+
func (m *ServiceBackendPort) Reset() { *m = ServiceBackendPort{} }
func (*ServiceBackendPort) ProtoMessage() {}
func (*ServiceBackendPort) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{26}
+ return fileDescriptor_2c41434372fec1d7, []int{30}
}
func (m *ServiceBackendPort) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -804,9 +916,124 @@ func (m *ServiceBackendPort) XXX_DiscardUnknown() {
var xxx_messageInfo_ServiceBackendPort proto.InternalMessageInfo
+func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} }
+func (*ServiceCIDR) ProtoMessage() {}
+func (*ServiceCIDR) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2c41434372fec1d7, []int{31}
+}
+func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ServiceCIDR) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ServiceCIDR.Merge(m, src)
+}
+func (m *ServiceCIDR) XXX_Size() int {
+ return m.Size()
+}
+func (m *ServiceCIDR) XXX_DiscardUnknown() {
+ xxx_messageInfo_ServiceCIDR.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo
+
+func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} }
+func (*ServiceCIDRList) ProtoMessage() {}
+func (*ServiceCIDRList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2c41434372fec1d7, []int{32}
+}
+func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ServiceCIDRList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ServiceCIDRList.Merge(m, src)
+}
+func (m *ServiceCIDRList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ServiceCIDRList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo
+
+func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} }
+func (*ServiceCIDRSpec) ProtoMessage() {}
+func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2c41434372fec1d7, []int{33}
+}
+func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ServiceCIDRSpec.Merge(m, src)
+}
+func (m *ServiceCIDRSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ServiceCIDRSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo
+
+func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} }
+func (*ServiceCIDRStatus) ProtoMessage() {}
+func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2c41434372fec1d7, []int{34}
+}
+func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ServiceCIDRStatus.Merge(m, src)
+}
+func (m *ServiceCIDRStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ServiceCIDRStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo
+
func init() {
proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.networking.v1.HTTPIngressPath")
proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.networking.v1.HTTPIngressRuleValue")
+ proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1.IPAddress")
+ proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1.IPAddressList")
+ proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1.IPAddressSpec")
proto.RegisterType((*IPBlock)(nil), "k8s.io.api.networking.v1.IPBlock")
proto.RegisterType((*Ingress)(nil), "k8s.io.api.networking.v1.Ingress")
proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.networking.v1.IngressBackend")
@@ -831,7 +1058,12 @@ func init() {
proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.networking.v1.NetworkPolicyPeer")
proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.networking.v1.NetworkPolicyPort")
proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.networking.v1.NetworkPolicySpec")
+ proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1.ParentReference")
proto.RegisterType((*ServiceBackendPort)(nil), "k8s.io.api.networking.v1.ServiceBackendPort")
+ proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1.ServiceCIDR")
+ proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1.ServiceCIDRList")
+ proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1.ServiceCIDRSpec")
+ proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1.ServiceCIDRStatus")
}
func init() {
@@ -839,111 +1071,125 @@ func init() {
}
var fileDescriptor_2c41434372fec1d7 = []byte{
- // 1652 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x1b, 0x55,
- 0x14, 0xce, 0x38, 0x71, 0xec, 0x1c, 0x27, 0x69, 0x72, 0x69, 0x85, 0x09, 0xc2, 0x0e, 0x23, 0xda,
- 0x06, 0xda, 0xda, 0x34, 0xad, 0x10, 0x6c, 0x78, 0x4c, 0x9a, 0xa6, 0xa1, 0xa9, 0x63, 0x5d, 0x5b,
- 0x45, 0x20, 0x1e, 0x9d, 0x8c, 0x6f, 0x9c, 0x69, 0xc6, 0x33, 0xa3, 0x3b, 0xd7, 0xa5, 0x95, 0x10,
- 0x62, 0xc3, 0x82, 0x1d, 0x7f, 0x01, 0xf1, 0x0b, 0x10, 0x2c, 0x90, 0x10, 0x14, 0x36, 0xa8, 0xcb,
- 0x4a, 0x6c, 0xba, 0xc1, 0xa2, 0xe6, 0x5f, 0x64, 0x85, 0xee, 0x63, 0x1e, 0x7e, 0xd5, 0xa6, 0xaa,
- 0xb2, 0x4a, 0xee, 0x39, 0xe7, 0x7e, 0xe7, 0x71, 0xcf, 0x6b, 0x0c, 0x6b, 0x87, 0x6f, 0x06, 0x25,
- 0xdb, 0x2b, 0x9b, 0xbe, 0x5d, 0x76, 0x09, 0xfb, 0xdc, 0xa3, 0x87, 0xb6, 0xdb, 0x2c, 0xdf, 0xb9,
- 0x58, 0x6e, 0x12, 0x97, 0x50, 0x93, 0x91, 0x46, 0xc9, 0xa7, 0x1e, 0xf3, 0x50, 0x5e, 0x4a, 0x96,
- 0x4c, 0xdf, 0x2e, 0xc5, 0x92, 0xa5, 0x3b, 0x17, 0x57, 0x2e, 0x34, 0x6d, 0x76, 0xd0, 0xde, 0x2b,
- 0x59, 0x5e, 0xab, 0xdc, 0xf4, 0x9a, 0x5e, 0x59, 0x5c, 0xd8, 0x6b, 0xef, 0x8b, 0x93, 0x38, 0x88,
- 0xff, 0x24, 0xd0, 0x8a, 0x9e, 0x50, 0x69, 0x79, 0x94, 0x0c, 0x51, 0xb6, 0x72, 0x39, 0x96, 0x69,
- 0x99, 0xd6, 0x81, 0xed, 0x12, 0x7a, 0xaf, 0xec, 0x1f, 0x36, 0x39, 0x21, 0x28, 0xb7, 0x08, 0x33,
- 0x87, 0xdd, 0x2a, 0x8f, 0xba, 0x45, 0xdb, 0x2e, 0xb3, 0x5b, 0x64, 0xe0, 0xc2, 0x1b, 0xe3, 0x2e,
- 0x04, 0xd6, 0x01, 0x69, 0x99, 0x03, 0xf7, 0x2e, 0x8d, 0xba, 0xd7, 0x66, 0xb6, 0x53, 0xb6, 0x5d,
- 0x16, 0x30, 0xda, 0x7f, 0x49, 0xff, 0x4d, 0x83, 0x13, 0xd7, 0xea, 0xf5, 0xea, 0xb6, 0xdb, 0xa4,
- 0x24, 0x08, 0xaa, 0x26, 0x3b, 0x40, 0xab, 0x30, 0xe3, 0x9b, 0xec, 0x20, 0xaf, 0xad, 0x6a, 0x6b,
- 0x73, 0xc6, 0xfc, 0x83, 0x4e, 0x71, 0xaa, 0xdb, 0x29, 0xce, 0x70, 0x1e, 0x16, 0x1c, 0x74, 0x19,
- 0xb2, 0xfc, 0x6f, 0xfd, 0x9e, 0x4f, 0xf2, 0xd3, 0x42, 0x2a, 0xdf, 0xed, 0x14, 0xb3, 0x55, 0x45,
- 0x3b, 0x4a, 0xfc, 0x8f, 0x23, 0x49, 0x54, 0x83, 0xcc, 0x9e, 0x69, 0x1d, 0x12, 0xb7, 0x91, 0x4f,
- 0xad, 0x6a, 0x6b, 0xb9, 0xf5, 0xb5, 0xd2, 0xa8, 0xe7, 0x2b, 0x29, 0x7b, 0x0c, 0x29, 0x6f, 0x9c,
- 0x50, 0x46, 0x64, 0x14, 0x01, 0x87, 0x48, 0xfa, 0x3e, 0x9c, 0x4c, 0xd8, 0x8f, 0xdb, 0x0e, 0xb9,
- 0x69, 0x3a, 0x6d, 0x82, 0x2a, 0x90, 0xe6, 0x8a, 0x83, 0xbc, 0xb6, 0x3a, 0xbd, 0x96, 0x5b, 0x7f,
- 0x75, 0xb4, 0xaa, 0x3e, 0xf7, 0x8d, 0x05, 0xa5, 0x2b, 0xcd, 0x4f, 0x01, 0x96, 0x30, 0xfa, 0x2e,
- 0x64, 0xb6, 0xab, 0x86, 0xe3, 0x59, 0x87, 0x3c, 0x3e, 0x96, 0xdd, 0xa0, 0xfd, 0xf1, 0xd9, 0xd8,
- 0xbe, 0x82, 0xb1, 0xe0, 0x20, 0x1d, 0x66, 0xc9, 0x5d, 0x8b, 0xf8, 0x2c, 0x9f, 0x5a, 0x9d, 0x5e,
- 0x9b, 0x33, 0xa0, 0xdb, 0x29, 0xce, 0x6e, 0x0a, 0x0a, 0x56, 0x1c, 0xfd, 0xeb, 0x14, 0x64, 0x94,
- 0x5a, 0x74, 0x0b, 0xb2, 0x3c, 0x7d, 0x1a, 0x26, 0x33, 0x05, 0x6a, 0x6e, 0xfd, 0xf5, 0x84, 0xbd,
- 0xd1, 0x6b, 0x96, 0xfc, 0xc3, 0x26, 0x27, 0x04, 0x25, 0x2e, 0xcd, 0x6d, 0xdf, 0xdd, 0xbb, 0x4d,
- 0x2c, 0x76, 0x83, 0x30, 0xd3, 0x40, 0xca, 0x0e, 0x88, 0x69, 0x38, 0x42, 0x45, 0x5b, 0x30, 0x13,
- 0xf8, 0xc4, 0x52, 0x81, 0x3f, 0x3d, 0x36, 0xf0, 0x35, 0x9f, 0x58, 0xb1, 0x6b, 0xfc, 0x84, 0x05,
- 0x00, 0xda, 0x85, 0xd9, 0x80, 0x99, 0xac, 0x1d, 0x88, 0x87, 0xcf, 0xad, 0x9f, 0x1d, 0x0f, 0x25,
- 0xc4, 0x8d, 0x45, 0x05, 0x36, 0x2b, 0xcf, 0x58, 0xc1, 0xe8, 0x7f, 0x68, 0xb0, 0xd8, 0xfb, 0xda,
- 0xe8, 0x26, 0x64, 0x02, 0x42, 0xef, 0xd8, 0x16, 0xc9, 0xcf, 0x08, 0x25, 0xe5, 0xf1, 0x4a, 0xa4,
- 0x7c, 0x98, 0x2f, 0x39, 0x9e, 0x2b, 0x8a, 0x86, 0x43, 0x30, 0xf4, 0x01, 0x64, 0x29, 0x09, 0xbc,
- 0x36, 0xb5, 0x88, 0xb2, 0xfe, 0x42, 0x12, 0x98, 0xd7, 0x3d, 0x87, 0xe4, 0xc9, 0xda, 0xd8, 0xf1,
- 0x2c, 0xd3, 0x91, 0xa1, 0xc4, 0x64, 0x9f, 0x50, 0xe2, 0x5a, 0xc4, 0x98, 0xe7, 0x59, 0x8e, 0x15,
- 0x04, 0x8e, 0xc0, 0x78, 0x15, 0xcd, 0x2b, 0x43, 0x36, 0x1c, 0xf3, 0x58, 0x1e, 0x74, 0xa7, 0xe7,
- 0x41, 0x5f, 0x1b, 0x1b, 0x20, 0x61, 0xd7, 0xa8, 0x57, 0xd5, 0x7f, 0xd5, 0x60, 0x29, 0x29, 0xb8,
- 0x63, 0x07, 0x0c, 0x7d, 0x3c, 0xe0, 0x44, 0x69, 0x32, 0x27, 0xf8, 0x6d, 0xe1, 0xc2, 0x92, 0x52,
- 0x95, 0x0d, 0x29, 0x09, 0x07, 0xae, 0x43, 0xda, 0x66, 0xa4, 0x15, 0x88, 0x12, 0xc9, 0xad, 0x9f,
- 0x99, 0xcc, 0x83, 0xb8, 0x3a, 0xb7, 0xf9, 0x65, 0x2c, 0x31, 0xf4, 0xbf, 0x35, 0x28, 0x26, 0xc5,
- 0xaa, 0x26, 0x35, 0x5b, 0x84, 0x11, 0x1a, 0x44, 0x8f, 0x87, 0xd6, 0x20, 0x6b, 0x56, 0xb7, 0xb7,
- 0xa8, 0xd7, 0xf6, 0xc3, 0xd2, 0xe5, 0xa6, 0xbd, 0xa7, 0x68, 0x38, 0xe2, 0xf2, 0x02, 0x3f, 0xb4,
- 0x55, 0x97, 0x4a, 0x14, 0xf8, 0x75, 0xdb, 0x6d, 0x60, 0xc1, 0xe1, 0x12, 0xae, 0xd9, 0x0a, 0x9b,
- 0x5f, 0x24, 0x51, 0x31, 0x5b, 0x04, 0x0b, 0x0e, 0x2a, 0x42, 0x3a, 0xb0, 0x3c, 0x5f, 0x66, 0xf0,
- 0x9c, 0x31, 0xc7, 0x4d, 0xae, 0x71, 0x02, 0x96, 0x74, 0x74, 0x0e, 0xe6, 0xb8, 0x60, 0xe0, 0x9b,
- 0x16, 0xc9, 0xa7, 0x85, 0xd0, 0x42, 0xb7, 0x53, 0x9c, 0xab, 0x84, 0x44, 0x1c, 0xf3, 0xf5, 0x1f,
- 0xfa, 0xde, 0x87, 0x3f, 0x1d, 0x5a, 0x07, 0xb0, 0x3c, 0x97, 0x51, 0xcf, 0x71, 0x48, 0xd8, 0x8d,
- 0xa2, 0xa4, 0xd9, 0x88, 0x38, 0x38, 0x21, 0x85, 0x6c, 0x00, 0x3f, 0x8a, 0x8d, 0x4a, 0x9e, 0xb7,
- 0x26, 0x0b, 0xfd, 0x90, 0x98, 0x1a, 0x8b, 0x5c, 0x55, 0x82, 0x91, 0x00, 0xd7, 0x7f, 0xd4, 0x20,
- 0xa7, 0xee, 0x1f, 0x43, 0x3a, 0x5d, 0xed, 0x4d, 0xa7, 0x97, 0xc7, 0x8f, 0x96, 0xe1, 0x99, 0xf4,
- 0xb3, 0x06, 0x2b, 0xa1, 0xd5, 0x9e, 0xd9, 0x30, 0x4c, 0xc7, 0x74, 0x2d, 0x42, 0xc3, 0x4e, 0xbd,
- 0x02, 0x29, 0x3b, 0x4c, 0x1f, 0x50, 0x00, 0xa9, 0xed, 0x2a, 0x4e, 0xd9, 0x3e, 0x3a, 0x0f, 0xd9,
- 0x03, 0x2f, 0x60, 0x22, 0x31, 0x64, 0xea, 0x44, 0x06, 0x5f, 0x53, 0x74, 0x1c, 0x49, 0xa0, 0x2a,
- 0xa4, 0x7d, 0x8f, 0xb2, 0x20, 0x3f, 0x23, 0x0c, 0x3e, 0x37, 0xd6, 0xe0, 0xaa, 0x47, 0x99, 0xea,
- 0xa5, 0xf1, 0x88, 0xe2, 0x08, 0x58, 0x02, 0xe9, 0x5f, 0xc0, 0x0b, 0x43, 0x2c, 0x97, 0x57, 0xd0,
- 0x67, 0x90, 0xb1, 0x25, 0x53, 0x4d, 0xc4, 0xcb, 0x63, 0x15, 0x0e, 0xf1, 0x3f, 0x1e, 0xc4, 0xe1,
- 0xc0, 0x0d, 0x51, 0xf5, 0xef, 0x35, 0x58, 0x1e, 0xb0, 0x54, 0xec, 0x12, 0x1e, 0x65, 0x22, 0x62,
- 0xe9, 0xc4, 0x2e, 0xe1, 0x51, 0x86, 0x05, 0x07, 0x5d, 0x87, 0xac, 0x58, 0x45, 0x2c, 0xcf, 0x51,
- 0x51, 0x2b, 0x87, 0x51, 0xab, 0x2a, 0xfa, 0x51, 0xa7, 0xf8, 0xe2, 0xe0, 0x7e, 0x56, 0x0a, 0xd9,
- 0x38, 0x02, 0xe0, 0x55, 0x47, 0x28, 0xf5, 0xa8, 0x2a, 0x4c, 0x51, 0x75, 0x9b, 0x9c, 0x80, 0x25,
- 0x5d, 0xff, 0x2e, 0x4e, 0x4a, 0xbe, 0x2b, 0x70, 0xfb, 0xf8, 0x8b, 0xf4, 0xcf, 0x72, 0xfe, 0x5e,
- 0x58, 0x70, 0x90, 0x0f, 0x4b, 0x76, 0xdf, 0x72, 0x31, 0x71, 0xd3, 0x8d, 0x6e, 0x18, 0x79, 0x85,
- 0xbc, 0xd4, 0xcf, 0xc1, 0x03, 0xe8, 0xfa, 0x2d, 0x18, 0x90, 0xe2, 0xed, 0xfe, 0x80, 0x31, 0x7f,
- 0x48, 0xe1, 0x8c, 0xde, 0x66, 0x62, 0xed, 0x59, 0xe1, 0x53, 0xbd, 0x5e, 0xc5, 0x02, 0x45, 0xff,
- 0x46, 0x83, 0x53, 0x43, 0x07, 0x67, 0xd4, 0xd8, 0xb4, 0x91, 0x8d, 0xad, 0xa2, 0x5e, 0x54, 0xc6,
- 0xe0, 0xfc, 0x68, 0x4b, 0x7a, 0x91, 0xf9, 0x8b, 0x0f, 0x7b, 0x7f, 0xfd, 0xcf, 0x54, 0xf4, 0x22,
- 0xa2, 0xab, 0xbd, 0x1b, 0xc5, 0x5b, 0x74, 0x1d, 0xae, 0x59, 0xf5, 0xd0, 0x93, 0x89, 0xf8, 0x45,
- 0x3c, 0x3c, 0x20, 0x8d, 0x1a, 0xb0, 0xd8, 0x20, 0xfb, 0x66, 0xdb, 0x61, 0x4a, 0xb7, 0x8a, 0xda,
- 0xe4, 0xeb, 0x26, 0xea, 0x76, 0x8a, 0x8b, 0x57, 0x7a, 0x30, 0x70, 0x1f, 0x26, 0xda, 0x80, 0x69,
- 0xe6, 0x84, 0xed, 0xe6, 0x95, 0xb1, 0xd0, 0xf5, 0x9d, 0x9a, 0x91, 0x53, 0xee, 0x4f, 0xd7, 0x77,
- 0x6a, 0x98, 0xdf, 0x46, 0xef, 0x43, 0x9a, 0xb6, 0x1d, 0xc2, 0x97, 0xa9, 0xe9, 0x89, 0xf6, 0x32,
- 0xfe, 0xa6, 0x71, 0xf9, 0xf3, 0x53, 0x80, 0x25, 0x84, 0xfe, 0x25, 0x2c, 0xf4, 0x6c, 0x5c, 0xa8,
- 0x05, 0xf3, 0x4e, 0xa2, 0x84, 0x55, 0x14, 0x2e, 0xfd, 0xaf, 0xba, 0x57, 0x0d, 0xe7, 0xa4, 0xd2,
- 0x38, 0x9f, 0xe4, 0xe1, 0x1e, 0x78, 0xdd, 0x04, 0x88, 0x7d, 0xe5, 0x95, 0xc8, 0xcb, 0x47, 0x76,
- 0x1b, 0x55, 0x89, 0xbc, 0xaa, 0x02, 0x2c, 0xe9, 0x7c, 0x7a, 0x05, 0xc4, 0xa2, 0x84, 0x55, 0xe2,
- 0x7e, 0x19, 0x4d, 0xaf, 0x5a, 0xc4, 0xc1, 0x09, 0x29, 0xfd, 0x77, 0x0d, 0x16, 0x2a, 0xd2, 0xe4,
- 0xaa, 0xe7, 0xd8, 0xd6, 0xbd, 0x63, 0x58, 0xb4, 0x6e, 0xf4, 0x2c, 0x5a, 0x4f, 0x68, 0xd3, 0x3d,
- 0x86, 0x8d, 0xdc, 0xb4, 0x7e, 0xd2, 0xe0, 0xf9, 0x1e, 0xc9, 0xcd, 0xb8, 0x19, 0x45, 0x23, 0x41,
- 0x1b, 0x37, 0x12, 0x7a, 0x10, 0x44, 0x69, 0x0d, 0x1d, 0x09, 0x68, 0x0b, 0x52, 0xcc, 0x53, 0x39,
- 0x3a, 0x31, 0x1c, 0x21, 0x34, 0x9e, 0x6d, 0x75, 0x0f, 0xa7, 0x98, 0xa7, 0xff, 0xa2, 0x41, 0xbe,
- 0x47, 0x2a, 0xd9, 0x44, 0x9f, 0xbd, 0xdd, 0x37, 0x60, 0x66, 0x9f, 0x7a, 0xad, 0xa7, 0xb1, 0x3c,
- 0x0a, 0xfa, 0x55, 0xea, 0xb5, 0xb0, 0x80, 0xd1, 0xef, 0x6b, 0xb0, 0xdc, 0x23, 0x79, 0x0c, 0x0b,
- 0xc9, 0x4e, 0xef, 0x42, 0x72, 0x76, 0x42, 0x1f, 0x46, 0xac, 0x25, 0xf7, 0x53, 0x7d, 0x1e, 0x70,
- 0x5f, 0xd1, 0x3e, 0xe4, 0x7c, 0xaf, 0x51, 0x23, 0x0e, 0xb1, 0x98, 0x37, 0xac, 0xc0, 0x9f, 0xe4,
- 0x84, 0xb9, 0x47, 0x9c, 0xf0, 0xaa, 0x71, 0xa2, 0xdb, 0x29, 0xe6, 0xaa, 0x31, 0x16, 0x4e, 0x02,
- 0xa3, 0xbb, 0xb0, 0x1c, 0xed, 0xa2, 0x91, 0xb6, 0xd4, 0xd3, 0x6b, 0x3b, 0xd5, 0xed, 0x14, 0x97,
- 0x2b, 0xfd, 0x88, 0x78, 0x50, 0x09, 0xba, 0x06, 0x19, 0xdb, 0x17, 0x9f, 0xdd, 0xea, 0x8b, 0xed,
- 0x49, 0x8b, 0x9d, 0xfc, 0x3e, 0x97, 0x1f, 0x7f, 0xea, 0x80, 0xc3, 0xeb, 0xfa, 0x5f, 0xfd, 0x39,
- 0xc0, 0x13, 0x0e, 0x6d, 0x25, 0xb6, 0x0f, 0x39, 0xf3, 0xce, 0x3d, 0xdd, 0xe6, 0xd1, 0x3b, 0x16,
- 0x47, 0x37, 0xa1, 0x36, 0xb3, 0x9d, 0x92, 0xfc, 0x31, 0xa6, 0xb4, 0xed, 0xb2, 0x5d, 0x5a, 0x63,
- 0xd4, 0x76, 0x9b, 0x72, 0x44, 0x27, 0xd6, 0xa2, 0xd3, 0x90, 0x51, 0x53, 0x53, 0x38, 0x9e, 0x96,
- 0x5e, 0x6d, 0x4a, 0x12, 0x0e, 0x79, 0xfa, 0x51, 0x7f, 0x5e, 0x88, 0x19, 0x7a, 0xfb, 0x99, 0xe5,
- 0xc5, 0x73, 0x2a, 0x1b, 0x47, 0xe7, 0xc6, 0x27, 0xf1, 0x62, 0x29, 0x33, 0x7d, 0x7d, 0xc2, 0x4c,
- 0x4f, 0x4e, 0xb4, 0x91, 0x6b, 0x25, 0xfa, 0x10, 0x66, 0x89, 0x44, 0x97, 0x23, 0xf2, 0xe2, 0x84,
- 0xe8, 0x71, 0x5b, 0x8d, 0x7f, 0x79, 0x50, 0x34, 0x05, 0x88, 0xde, 0xe1, 0x51, 0xe2, 0xb2, 0xfc,
- 0x83, 0x5f, 0xee, 0xe1, 0x73, 0xc6, 0x4b, 0xd2, 0xd9, 0x88, 0x7c, 0xc4, 0x3f, 0x70, 0xa2, 0x23,
- 0x4e, 0xde, 0xd0, 0x3f, 0x05, 0x34, 0xb8, 0xe4, 0x4c, 0xb0, 0x42, 0x9d, 0x81, 0x59, 0xb7, 0xdd,
- 0xda, 0x23, 0xb2, 0x86, 0xd2, 0xb1, 0x81, 0x15, 0x41, 0xc5, 0x8a, 0x6b, 0xbc, 0xfd, 0xe0, 0x71,
- 0x61, 0xea, 0xe1, 0xe3, 0xc2, 0xd4, 0xa3, 0xc7, 0x85, 0xa9, 0xaf, 0xba, 0x05, 0xed, 0x41, 0xb7,
- 0xa0, 0x3d, 0xec, 0x16, 0xb4, 0x47, 0xdd, 0x82, 0xf6, 0x4f, 0xb7, 0xa0, 0x7d, 0xfb, 0x6f, 0x61,
- 0xea, 0xa3, 0xfc, 0xa8, 0x5f, 0x4b, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x24, 0x03, 0xec, 0x04,
- 0x48, 0x15, 0x00, 0x00,
+ // 1884 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0xcd, 0x8f, 0x1b, 0x49,
+ 0x15, 0x9f, 0xf6, 0x8c, 0x67, 0xec, 0xe7, 0xf9, 0xc8, 0x14, 0x59, 0x61, 0x06, 0x61, 0x87, 0x5e,
+ 0xb2, 0x3b, 0x4b, 0x76, 0x6d, 0x32, 0x1b, 0x21, 0xb8, 0x00, 0xdb, 0x93, 0x6c, 0xe2, 0xcd, 0xc4,
+ 0xb1, 0xca, 0x56, 0x10, 0x88, 0x8f, 0xed, 0x69, 0xd7, 0x78, 0x7a, 0xa7, 0xdd, 0xd5, 0xaa, 0x2e,
+ 0x87, 0x44, 0x42, 0x88, 0x0b, 0x07, 0x6e, 0xf0, 0x27, 0x20, 0xfe, 0x02, 0x04, 0xd2, 0xae, 0xb4,
+ 0x82, 0x85, 0x0b, 0xca, 0x71, 0x25, 0x2e, 0x7b, 0xc1, 0x22, 0xe6, 0xbf, 0xc8, 0x09, 0xd5, 0x47,
+ 0x7f, 0xd9, 0xee, 0xb1, 0x89, 0x22, 0x9f, 0xc6, 0xfd, 0xde, 0xab, 0xdf, 0x7b, 0xf5, 0xea, 0x7d,
+ 0x55, 0x0d, 0x1c, 0x5e, 0x7c, 0x27, 0x6c, 0xb8, 0xb4, 0x69, 0x07, 0x6e, 0xd3, 0x27, 0xfc, 0x17,
+ 0x94, 0x5d, 0xb8, 0xfe, 0xa0, 0xf9, 0xf8, 0x66, 0x73, 0x40, 0x7c, 0xc2, 0x6c, 0x4e, 0xfa, 0x8d,
+ 0x80, 0x51, 0x4e, 0x51, 0x55, 0x49, 0x36, 0xec, 0xc0, 0x6d, 0x24, 0x92, 0x8d, 0xc7, 0x37, 0x0f,
+ 0xde, 0x19, 0xb8, 0xfc, 0x7c, 0x74, 0xda, 0x70, 0xe8, 0xb0, 0x39, 0xa0, 0x03, 0xda, 0x94, 0x0b,
+ 0x4e, 0x47, 0x67, 0xf2, 0x4b, 0x7e, 0xc8, 0x5f, 0x0a, 0xe8, 0xc0, 0x4c, 0xa9, 0x74, 0x28, 0x23,
+ 0x73, 0x94, 0x1d, 0xdc, 0x4a, 0x64, 0x86, 0xb6, 0x73, 0xee, 0xfa, 0x84, 0x3d, 0x6d, 0x06, 0x17,
+ 0x03, 0x41, 0x08, 0x9b, 0x43, 0xc2, 0xed, 0x79, 0xab, 0x9a, 0x79, 0xab, 0xd8, 0xc8, 0xe7, 0xee,
+ 0x90, 0xcc, 0x2c, 0xf8, 0xf6, 0xa2, 0x05, 0xa1, 0x73, 0x4e, 0x86, 0xf6, 0xcc, 0xba, 0x77, 0xf3,
+ 0xd6, 0x8d, 0xb8, 0xeb, 0x35, 0x5d, 0x9f, 0x87, 0x9c, 0x4d, 0x2f, 0x32, 0xff, 0x66, 0xc0, 0xde,
+ 0xbd, 0x5e, 0xaf, 0xd3, 0xf2, 0x07, 0x8c, 0x84, 0x61, 0xc7, 0xe6, 0xe7, 0xe8, 0x1a, 0x6c, 0x04,
+ 0x36, 0x3f, 0xaf, 0x1a, 0xd7, 0x8c, 0xc3, 0xb2, 0xb5, 0xfd, 0x6c, 0x5c, 0x5f, 0x9b, 0x8c, 0xeb,
+ 0x1b, 0x82, 0x87, 0x25, 0x07, 0xdd, 0x82, 0x92, 0xf8, 0xdb, 0x7b, 0x1a, 0x90, 0xea, 0xba, 0x94,
+ 0xaa, 0x4e, 0xc6, 0xf5, 0x52, 0x47, 0xd3, 0x5e, 0xa4, 0x7e, 0xe3, 0x58, 0x12, 0x75, 0x61, 0xeb,
+ 0xd4, 0x76, 0x2e, 0x88, 0xdf, 0xaf, 0x16, 0xae, 0x19, 0x87, 0x95, 0xa3, 0xc3, 0x46, 0xde, 0xf1,
+ 0x35, 0xb4, 0x3d, 0x96, 0x92, 0xb7, 0xf6, 0xb4, 0x11, 0x5b, 0x9a, 0x80, 0x23, 0x24, 0xf3, 0x0c,
+ 0xae, 0xa6, 0xec, 0xc7, 0x23, 0x8f, 0x3c, 0xb2, 0xbd, 0x11, 0x41, 0x6d, 0x28, 0x0a, 0xc5, 0x61,
+ 0xd5, 0xb8, 0xb6, 0x7e, 0x58, 0x39, 0x7a, 0x2b, 0x5f, 0xd5, 0xd4, 0xf6, 0xad, 0x1d, 0xad, 0xab,
+ 0x28, 0xbe, 0x42, 0xac, 0x60, 0xcc, 0x4f, 0x0c, 0x28, 0xb7, 0x3a, 0xef, 0xf5, 0xfb, 0x42, 0x0e,
+ 0x7d, 0x08, 0x25, 0x71, 0xde, 0x7d, 0x9b, 0xdb, 0xd2, 0x4d, 0x95, 0xa3, 0x6f, 0xa5, 0x14, 0xc4,
+ 0xee, 0x6f, 0x04, 0x17, 0x03, 0x41, 0x08, 0x1b, 0x42, 0x5a, 0x28, 0x7b, 0x78, 0xfa, 0x11, 0x71,
+ 0xf8, 0x03, 0xc2, 0x6d, 0x0b, 0x69, 0x3d, 0x90, 0xd0, 0x70, 0x8c, 0x8a, 0x5a, 0xb0, 0x11, 0x06,
+ 0xc4, 0xd1, 0x9e, 0x7a, 0xf3, 0x12, 0x4f, 0x45, 0x46, 0x75, 0x03, 0xe2, 0x24, 0xa7, 0x25, 0xbe,
+ 0xb0, 0x84, 0x30, 0x3f, 0x36, 0x60, 0x27, 0x96, 0x3a, 0x71, 0x43, 0x8e, 0x7e, 0x32, 0x63, 0x7e,
+ 0x63, 0x39, 0xf3, 0xc5, 0x6a, 0x69, 0xfc, 0x15, 0xad, 0xa7, 0x14, 0x51, 0x52, 0xa6, 0xdf, 0x83,
+ 0xa2, 0xcb, 0xc9, 0x30, 0xac, 0x16, 0xa4, 0xeb, 0x5f, 0x5f, 0xc2, 0xf6, 0xc4, 0xe9, 0x2d, 0xb1,
+ 0x12, 0x2b, 0x00, 0x73, 0x90, 0x32, 0x5c, 0x6c, 0x08, 0x3d, 0x82, 0x72, 0x60, 0x33, 0xe2, 0x73,
+ 0x4c, 0xce, 0xb4, 0xe5, 0x97, 0x9c, 0x6c, 0x27, 0x12, 0x25, 0x8c, 0xf8, 0x0e, 0xb1, 0x76, 0x26,
+ 0xe3, 0x7a, 0x39, 0x26, 0xe2, 0x04, 0xca, 0x7c, 0x08, 0x5b, 0xad, 0x8e, 0xe5, 0x51, 0xe7, 0x42,
+ 0x44, 0xbf, 0xe3, 0xf6, 0xd9, 0x74, 0xf4, 0x1f, 0xb7, 0x6e, 0x63, 0x2c, 0x39, 0xc8, 0x84, 0x4d,
+ 0xf2, 0xc4, 0x21, 0x01, 0x97, 0x1b, 0x2c, 0x5b, 0x30, 0x19, 0xd7, 0x37, 0xef, 0x48, 0x0a, 0xd6,
+ 0x1c, 0xf3, 0x37, 0x05, 0xd8, 0xd2, 0x41, 0xb5, 0x82, 0x60, 0xb9, 0x9b, 0x09, 0x96, 0xeb, 0x0b,
+ 0xd3, 0x2a, 0x2f, 0x54, 0xd0, 0x43, 0xd8, 0x0c, 0xb9, 0xcd, 0x47, 0xa1, 0x4c, 0xeb, 0xcb, 0xe3,
+ 0x4e, 0x43, 0x49, 0x71, 0x6b, 0x57, 0x83, 0x6d, 0xaa, 0x6f, 0xac, 0x61, 0xcc, 0x7f, 0x18, 0xb0,
+ 0x9b, 0xcd, 0x65, 0xf4, 0x08, 0xb6, 0x42, 0xc2, 0x1e, 0xbb, 0x0e, 0xa9, 0x6e, 0x48, 0x25, 0xcd,
+ 0xc5, 0x4a, 0x94, 0x7c, 0x54, 0x0d, 0x2a, 0xa2, 0x12, 0x68, 0x1a, 0x8e, 0xc0, 0xd0, 0x0f, 0xa1,
+ 0xc4, 0x48, 0x48, 0x47, 0xcc, 0x21, 0xda, 0xfa, 0x77, 0xd2, 0xc0, 0xa2, 0xaa, 0x0b, 0x48, 0x51,
+ 0x8a, 0xfa, 0x27, 0xd4, 0xb1, 0x3d, 0xe5, 0xca, 0x24, 0x3c, 0xb6, 0x45, 0x3c, 0x63, 0x0d, 0x81,
+ 0x63, 0x30, 0x51, 0x23, 0xb7, 0xb5, 0x21, 0xc7, 0x9e, 0xbd, 0x92, 0x03, 0x3d, 0xc9, 0x1c, 0xe8,
+ 0x37, 0x17, 0x3a, 0x48, 0xda, 0x95, 0x5b, 0x00, 0xfe, 0x6a, 0xc0, 0x95, 0xb4, 0xe0, 0x0a, 0x6a,
+ 0xc0, 0xfd, 0x6c, 0x0d, 0x78, 0x63, 0xb9, 0x1d, 0xe4, 0x94, 0x81, 0x7f, 0x1b, 0x50, 0x4f, 0x8b,
+ 0x75, 0x6c, 0x66, 0x0f, 0x09, 0x27, 0x2c, 0x8c, 0x0f, 0x0f, 0x1d, 0x42, 0xc9, 0xee, 0xb4, 0xee,
+ 0x32, 0x3a, 0x0a, 0xa2, 0xd4, 0x15, 0xa6, 0xbd, 0xa7, 0x69, 0x38, 0xe6, 0x8a, 0x04, 0xbf, 0x70,
+ 0x75, 0x0f, 0x4a, 0x25, 0xf8, 0x7d, 0xd7, 0xef, 0x63, 0xc9, 0x11, 0x12, 0xbe, 0x3d, 0x8c, 0x5a,
+ 0x5b, 0x2c, 0xd1, 0xb6, 0x87, 0x04, 0x4b, 0x0e, 0xaa, 0x43, 0x31, 0x74, 0x68, 0xa0, 0x22, 0xb8,
+ 0x6c, 0x95, 0x85, 0xc9, 0x5d, 0x41, 0xc0, 0x8a, 0x8e, 0x6e, 0x40, 0x59, 0x08, 0x86, 0x81, 0xed,
+ 0x90, 0x6a, 0x51, 0x0a, 0xc9, 0xea, 0xd3, 0x8e, 0x88, 0x38, 0xe1, 0x9b, 0x7f, 0x9a, 0x3a, 0x1f,
+ 0x59, 0xea, 0x8e, 0x00, 0x1c, 0xea, 0x73, 0x46, 0x3d, 0x8f, 0x44, 0xd5, 0x28, 0x0e, 0x9a, 0xe3,
+ 0x98, 0x83, 0x53, 0x52, 0xc8, 0x05, 0x08, 0x62, 0xdf, 0xe8, 0xe0, 0xf9, 0xee, 0x72, 0xae, 0x9f,
+ 0xe3, 0x53, 0x6b, 0x57, 0xa8, 0x4a, 0x31, 0x52, 0xe0, 0xe6, 0x9f, 0x0d, 0xa8, 0xe8, 0xf5, 0x2b,
+ 0x08, 0xa7, 0xf7, 0xb3, 0xe1, 0xf4, 0xf5, 0xc5, 0x83, 0xc3, 0xfc, 0x48, 0xfa, 0xc4, 0x80, 0x83,
+ 0xc8, 0x6a, 0x6a, 0xf7, 0x2d, 0xdb, 0xb3, 0x7d, 0x87, 0xb0, 0xa8, 0x52, 0x1f, 0x40, 0xc1, 0x8d,
+ 0xc2, 0x07, 0x34, 0x40, 0xa1, 0xd5, 0xc1, 0x05, 0x37, 0x40, 0x6f, 0x43, 0xe9, 0x9c, 0x86, 0x5c,
+ 0x06, 0x86, 0x0a, 0x9d, 0xd8, 0xe0, 0x7b, 0x9a, 0x8e, 0x63, 0x09, 0xd4, 0x81, 0x62, 0x40, 0x19,
+ 0x0f, 0xab, 0x1b, 0xd2, 0xe0, 0x1b, 0x0b, 0x0d, 0xee, 0x50, 0xc6, 0x75, 0x2d, 0x4d, 0x06, 0x10,
+ 0x81, 0x80, 0x15, 0x90, 0xf9, 0x4b, 0xf8, 0xca, 0x1c, 0xcb, 0xd5, 0x12, 0xf4, 0x73, 0xd8, 0x72,
+ 0x15, 0x53, 0xcf, 0x3b, 0xb7, 0x16, 0x2a, 0x9c, 0xb3, 0xff, 0x64, 0xcc, 0x8a, 0xc6, 0xa9, 0x08,
+ 0xd5, 0xfc, 0xa3, 0x01, 0xfb, 0x33, 0x96, 0xca, 0x49, 0x91, 0x32, 0x2e, 0x3d, 0x56, 0x4c, 0x4d,
+ 0x8a, 0x94, 0x71, 0x2c, 0x39, 0xe8, 0x3e, 0x94, 0xe4, 0xa0, 0xe9, 0x50, 0x4f, 0x7b, 0xad, 0x19,
+ 0x79, 0xad, 0xa3, 0xe9, 0x2f, 0xc6, 0xf5, 0xaf, 0xce, 0x4e, 0xdf, 0x8d, 0x88, 0x8d, 0x63, 0x00,
+ 0x91, 0x75, 0x84, 0x31, 0xca, 0x74, 0x62, 0xca, 0xac, 0xbb, 0x23, 0x08, 0x58, 0xd1, 0xcd, 0x3f,
+ 0x24, 0x41, 0x29, 0x26, 0x41, 0x61, 0x9f, 0x38, 0x91, 0xe9, 0x5e, 0x2e, 0xce, 0x0b, 0x4b, 0x0e,
+ 0x0a, 0xe0, 0x8a, 0x3b, 0x35, 0x3a, 0x2e, 0x5d, 0x74, 0xe3, 0x15, 0x56, 0x55, 0x23, 0x5f, 0x99,
+ 0xe6, 0xe0, 0x19, 0x74, 0xf3, 0x43, 0x98, 0x91, 0x12, 0xe5, 0xfe, 0x9c, 0xf3, 0x60, 0x4e, 0xe2,
+ 0xe4, 0xcf, 0xaa, 0x89, 0xf6, 0x92, 0xdc, 0x53, 0xaf, 0xd7, 0xc1, 0x12, 0xc5, 0xfc, 0xad, 0x01,
+ 0xaf, 0xcd, 0x6d, 0x9c, 0x71, 0x61, 0x33, 0x72, 0x0b, 0x5b, 0x5b, 0x9f, 0xa8, 0xf2, 0xc1, 0xdb,
+ 0xf9, 0x96, 0x64, 0x91, 0xc5, 0x89, 0xcf, 0x3b, 0x7f, 0xf3, 0x9f, 0x85, 0xf8, 0x44, 0x64, 0x55,
+ 0xfb, 0x41, 0xec, 0x6f, 0x59, 0x75, 0x84, 0x66, 0x5d, 0x43, 0xaf, 0xa6, 0xfc, 0x17, 0xf3, 0xf0,
+ 0x8c, 0x34, 0xea, 0xc3, 0x6e, 0x9f, 0x9c, 0xd9, 0x23, 0x8f, 0x6b, 0xdd, 0xda, 0x6b, 0xcb, 0x5f,
+ 0x26, 0xd0, 0x64, 0x5c, 0xdf, 0xbd, 0x9d, 0xc1, 0xc0, 0x53, 0x98, 0xe8, 0x18, 0xd6, 0xb9, 0x17,
+ 0x95, 0x9b, 0x6f, 0x2c, 0x84, 0xee, 0x9d, 0x74, 0xad, 0x8a, 0xde, 0xfe, 0x7a, 0xef, 0xa4, 0x8b,
+ 0xc5, 0x6a, 0xf4, 0x01, 0x14, 0xd9, 0xc8, 0x23, 0x62, 0x98, 0x5a, 0x5f, 0x6a, 0x2e, 0x13, 0x67,
+ 0x9a, 0xa4, 0xbf, 0xf8, 0x0a, 0xb1, 0x82, 0x30, 0x7f, 0x05, 0x3b, 0x99, 0x89, 0x0b, 0x0d, 0x61,
+ 0xdb, 0x4b, 0xa5, 0xb0, 0xf6, 0xc2, 0xbb, 0xff, 0x57, 0xde, 0xeb, 0x82, 0x73, 0x55, 0x6b, 0xdc,
+ 0x4e, 0xf3, 0x70, 0x06, 0xde, 0xb4, 0x01, 0x92, 0xbd, 0x8a, 0x4c, 0x14, 0xe9, 0xa3, 0xaa, 0x8d,
+ 0xce, 0x44, 0x91, 0x55, 0x21, 0x56, 0x74, 0xd1, 0xbd, 0x42, 0xe2, 0x30, 0xc2, 0xdb, 0x49, 0xbd,
+ 0x8c, 0xbb, 0x57, 0x37, 0xe6, 0xe0, 0x94, 0x94, 0xf9, 0x77, 0x03, 0x76, 0xda, 0xca, 0xe4, 0x0e,
+ 0xf5, 0x5c, 0xe7, 0xe9, 0x0a, 0x06, 0xad, 0x07, 0x99, 0x41, 0xeb, 0x92, 0x32, 0x9d, 0x31, 0x2c,
+ 0x77, 0xd2, 0xfa, 0x8b, 0x01, 0x5f, 0xce, 0x48, 0xde, 0x49, 0x8a, 0x51, 0xdc, 0x12, 0x8c, 0x45,
+ 0x2d, 0x21, 0x83, 0x20, 0x53, 0x6b, 0x6e, 0x4b, 0x40, 0x77, 0xa1, 0xc0, 0xa9, 0x8e, 0xd1, 0xa5,
+ 0xe1, 0x08, 0x61, 0x49, 0x6f, 0xeb, 0x51, 0x5c, 0xe0, 0xd4, 0xfc, 0xd4, 0x80, 0x6a, 0x46, 0x2a,
+ 0x5d, 0x44, 0x5f, 0xbd, 0xdd, 0x0f, 0x60, 0xe3, 0x8c, 0xd1, 0xe1, 0xcb, 0x58, 0x1e, 0x3b, 0xfd,
+ 0x7d, 0x46, 0x87, 0x58, 0xc2, 0x98, 0x9f, 0x19, 0xb0, 0x9f, 0x91, 0x5c, 0xc1, 0x40, 0x72, 0x92,
+ 0x1d, 0x48, 0xde, 0x5c, 0x72, 0x0f, 0x39, 0x63, 0xc9, 0x67, 0x85, 0xa9, 0x1d, 0x88, 0xbd, 0xa2,
+ 0x33, 0xa8, 0x04, 0xb4, 0xdf, 0x25, 0x1e, 0x71, 0x38, 0x9d, 0x97, 0xe0, 0x97, 0x6d, 0xc2, 0x3e,
+ 0x25, 0x5e, 0xb4, 0xd4, 0xda, 0x9b, 0x8c, 0xeb, 0x95, 0x4e, 0x82, 0x85, 0xd3, 0xc0, 0xe8, 0x09,
+ 0xec, 0xc7, 0xb3, 0x68, 0xac, 0xad, 0xf0, 0xf2, 0xda, 0x5e, 0x9b, 0x8c, 0xeb, 0xfb, 0xed, 0x69,
+ 0x44, 0x3c, 0xab, 0x04, 0xdd, 0x83, 0x2d, 0x37, 0x90, 0xd7, 0x6e, 0x7d, 0x63, 0xbb, 0x6c, 0xb0,
+ 0x53, 0xf7, 0x73, 0x75, 0xf9, 0xd3, 0x1f, 0x38, 0x5a, 0x6e, 0xfe, 0x6b, 0x3a, 0x06, 0x44, 0xc0,
+ 0xa1, 0xbb, 0xa9, 0xe9, 0x43, 0xf5, 0xbc, 0x1b, 0x2f, 0x37, 0x79, 0x64, 0xdb, 0x62, 0x7e, 0x11,
+ 0x1a, 0x71, 0xd7, 0x6b, 0xa8, 0xa7, 0xb6, 0x46, 0xcb, 0xe7, 0x0f, 0x59, 0x97, 0x33, 0xd7, 0x1f,
+ 0xa8, 0x16, 0x9d, 0x1a, 0x8b, 0xae, 0xc3, 0x96, 0xee, 0x9a, 0x72, 0xe3, 0x45, 0xb5, 0xab, 0x3b,
+ 0x8a, 0x84, 0x23, 0x9e, 0xf9, 0x62, 0x3a, 0x2e, 0x64, 0x0f, 0xfd, 0xe8, 0x95, 0xc5, 0xc5, 0x97,
+ 0x74, 0x34, 0xe6, 0xc7, 0xc6, 0x4f, 0x93, 0xc1, 0x52, 0x45, 0xfa, 0xd1, 0x92, 0x91, 0x9e, 0xee,
+ 0x68, 0xb9, 0x63, 0x25, 0xfa, 0x11, 0x6c, 0x12, 0x85, 0xae, 0x5a, 0xe4, 0xcd, 0x25, 0xd1, 0x93,
+ 0xb2, 0x9a, 0xbc, 0x3c, 0x68, 0x9a, 0x06, 0x44, 0xdf, 0x17, 0x5e, 0x12, 0xb2, 0xe2, 0xc2, 0xaf,
+ 0xe6, 0xf0, 0xb2, 0xf5, 0x35, 0xb5, 0xd9, 0x98, 0xfc, 0x42, 0x5c, 0x70, 0xe2, 0x4f, 0x9c, 0x5e,
+ 0x61, 0x7e, 0x6c, 0xc0, 0xde, 0xd4, 0x0b, 0x12, 0x7a, 0x1d, 0x8a, 0x83, 0xd4, 0x15, 0x33, 0xce,
+ 0x66, 0x75, 0xc7, 0x54, 0x3c, 0x71, 0x53, 0x88, 0x1f, 0x22, 0xa6, 0x6e, 0x0a, 0xb3, 0xaf, 0x0b,
+ 0xa8, 0x99, 0xbe, 0x29, 0xaa, 0xc1, 0x76, 0x5f, 0x8b, 0xcf, 0xbd, 0x2d, 0xc6, 0x43, 0xdc, 0x46,
+ 0xde, 0x10, 0x67, 0xfe, 0x0c, 0xd0, 0xec, 0x78, 0xb6, 0xc4, 0xf0, 0xf7, 0x06, 0x6c, 0xfa, 0xa3,
+ 0xe1, 0x29, 0x51, 0xd9, 0x5f, 0x4c, 0x5c, 0xdb, 0x96, 0x54, 0xac, 0xb9, 0xe6, 0xef, 0x0b, 0x50,
+ 0xd1, 0x0a, 0x8e, 0x5b, 0xb7, 0xf1, 0x0a, 0xda, 0xf4, 0xfd, 0x4c, 0x9b, 0x7e, 0x6b, 0xe1, 0x58,
+ 0x2a, 0xcc, 0xca, 0x7d, 0xe4, 0xea, 0x4e, 0x3d, 0x72, 0xdd, 0x58, 0x0e, 0xee, 0xf2, 0x87, 0xae,
+ 0x4f, 0x0d, 0xd8, 0x4b, 0x49, 0xaf, 0xa0, 0x05, 0x7d, 0x90, 0x6d, 0x41, 0xd7, 0x97, 0xda, 0x45,
+ 0x4e, 0x03, 0x3a, 0xca, 0x18, 0x2f, 0xab, 0x4c, 0x1d, 0x8a, 0x8e, 0xdb, 0x67, 0x99, 0x11, 0x4f,
+ 0x30, 0x43, 0xac, 0xe8, 0xe6, 0x13, 0xd8, 0x9f, 0x71, 0x0f, 0x72, 0xe4, 0xab, 0x45, 0xdf, 0xe5,
+ 0x2e, 0xf5, 0xa3, 0x89, 0xa1, 0xb9, 0xdc, 0xa6, 0x8f, 0xa3, 0x75, 0x99, 0x67, 0x0e, 0x0d, 0x85,
+ 0x53, 0xb0, 0xd6, 0xf7, 0x9e, 0x3d, 0xaf, 0xad, 0x7d, 0xfe, 0xbc, 0xb6, 0xf6, 0xc5, 0xf3, 0xda,
+ 0xda, 0xaf, 0x27, 0x35, 0xe3, 0xd9, 0xa4, 0x66, 0x7c, 0x3e, 0xa9, 0x19, 0x5f, 0x4c, 0x6a, 0xc6,
+ 0x7f, 0x26, 0x35, 0xe3, 0x77, 0xff, 0xad, 0xad, 0xfd, 0xb8, 0x9a, 0xf7, 0x5f, 0xa4, 0xff, 0x05,
+ 0x00, 0x00, 0xff, 0xff, 0xb5, 0x6b, 0x8c, 0x52, 0x60, 0x1a, 0x00, 0x00,
}
func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) {
@@ -1028,7 +1274,7 @@ func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *IPBlock) Marshal() (dAtA []byte, err error) {
+func (m *IPAddress) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1038,34 +1284,40 @@ func (m *IPBlock) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) {
+func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Except) > 0 {
- for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Except[iNdEx])
- copy(dAtA[i:], m.Except[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx])))
- i--
- dAtA[i] = 0x12
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- i -= len(m.CIDR)
- copy(dAtA[i:], m.CIDR)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *Ingress) Marshal() (dAtA []byte, err error) {
+func (m *IPAddressList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1075,38 +1327,32 @@ func (m *Ingress) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *Ingress) MarshalTo(dAtA []byte) (int, error) {
+func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- {
- size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- {
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
}
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- i--
- dAtA[i] = 0x12
{
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -1118,7 +1364,7 @@ func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *IngressBackend) Marshal() (dAtA []byte, err error) {
+func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1128,19 +1374,19 @@ func (m *IngressBackend) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) {
+func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if m.Service != nil {
+ if m.ParentRef != nil {
{
- size, err := m.Service.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -1148,15 +1394,140 @@ func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0x22
+ dAtA[i] = 0xa
}
- if m.Resource != nil {
- {
- size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
+ return len(dAtA) - i, nil
+}
+
+func (m *IPBlock) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Except) > 0 {
+ for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Except[iNdEx])
+ copy(dAtA[i:], m.Except[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.CIDR)
+ copy(dAtA[i:], m.CIDR)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Ingress) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Ingress) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *IngressBackend) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Service != nil {
+ {
+ size, err := m.Service.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Resource != nil {
+ {
+ size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
@@ -2137,6 +2508,49 @@ func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *ParentReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Resource)
+ copy(dAtA[i:], m.Resource)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Group)
+ copy(dAtA[i:], m.Group)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func (m *ServiceBackendPort) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -2168,72 +2582,284 @@ func (m *ServiceBackendPort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
- offset -= sovGenerated(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
+func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- dAtA[offset] = uint8(v)
- return base
+ return dAtA[:n], nil
}
-func (m *HTTPIngressPath) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Path)
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Backend.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if m.PathType != nil {
- l = len(*m.PathType)
- n += 1 + l + sovGenerated(uint64(l))
- }
- return n
+
+func (m *ServiceCIDR) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *HTTPIngressRuleValue) Size() (n int) {
- if m == nil {
- return 0
- }
+func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- if len(m.Paths) > 0 {
- for _, e := range m.Paths {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- return n
-}
-
-func (m *IPBlock) Size() (n int) {
- if m == nil {
- return 0
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- var l int
- _ = l
- l = len(m.CIDR)
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Except) > 0 {
- for _, s := range m.Except {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- return n
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *Ingress) Size() (n int) {
- if m == nil {
- return 0
+func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- var l int
- _ = l
+ return dAtA[:n], nil
+}
+
+func (m *ServiceCIDRList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ServiceCIDRSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServiceCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.CIDRs) > 0 {
+ for iNdEx := len(m.CIDRs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.CIDRs[iNdEx])
+ copy(dAtA[i:], m.CIDRs[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRs[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ServiceCIDRStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServiceCIDRStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *HTTPIngressPath) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Backend.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.PathType != nil {
+ l = len(*m.PathType)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *HTTPIngressRuleValue) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Paths) > 0 {
+ for _, e := range m.Paths {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *IPAddress) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *IPAddressList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *IPAddressSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ParentRef != nil {
+ l = m.ParentRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *IPBlock) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.CIDR)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Except) > 0 {
+ for _, s := range m.Except {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Ingress) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Spec.Size()
@@ -2635,6 +3261,23 @@ func (m *NetworkPolicySpec) Size() (n int) {
return n
}
+func (m *ParentReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Group)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Resource)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m *ServiceBackendPort) Size() (n int) {
if m == nil {
return 0
@@ -2647,39 +3290,138 @@ func (m *ServiceBackendPort) Size() (n int) {
return n
}
-func sovGenerated(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozGenerated(x uint64) (n int) {
- return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (this *HTTPIngressPath) String() string {
- if this == nil {
- return "nil"
+func (m *ServiceCIDR) Size() (n int) {
+ if m == nil {
+ return 0
}
- s := strings.Join([]string{`&HTTPIngressPath{`,
- `Path:` + fmt.Sprintf("%v", this.Path) + `,`,
- `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`,
- `PathType:` + valueToStringGenerated(this.PathType) + `,`,
- `}`,
- }, "")
- return s
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
}
-func (this *HTTPIngressRuleValue) String() string {
- if this == nil {
- return "nil"
+
+func (m *ServiceCIDRList) Size() (n int) {
+ if m == nil {
+ return 0
}
- repeatedStringForPaths := "[]HTTPIngressPath{"
- for _, f := range this.Paths {
- repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + ","
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
}
- repeatedStringForPaths += "}"
+ return n
+}
+
+func (m *ServiceCIDRSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.CIDRs) > 0 {
+ for _, s := range m.CIDRs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ServiceCIDRStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *HTTPIngressPath) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&HTTPIngressPath{`,
+ `Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+ `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`,
+ `PathType:` + valueToStringGenerated(this.PathType) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *HTTPIngressRuleValue) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForPaths := "[]HTTPIngressPath{"
+ for _, f := range this.Paths {
+ repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForPaths += "}"
s := strings.Join([]string{`&HTTPIngressRuleValue{`,
`Paths:` + repeatedStringForPaths + `,`,
`}`,
}, "")
return s
}
+func (this *IPAddress) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IPAddress{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IPAddressList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]IPAddress{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&IPAddressList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IPAddressSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IPAddressSpec{`,
+ `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *IPBlock) String() string {
if this == nil {
return "nil"
@@ -3018,6 +3760,19 @@ func (this *NetworkPolicySpec) String() string {
}, "")
return s
}
+func (this *ParentReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ParentReference{`,
+ `Group:` + fmt.Sprintf("%v", this.Group) + `,`,
+ `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *ServiceBackendPort) String() string {
if this == nil {
return "nil"
@@ -3029,6 +3784,59 @@ func (this *ServiceBackendPort) String() string {
}, "")
return s
}
+func (this *ServiceCIDR) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ServiceCIDR{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceCIDRSpec", "ServiceCIDRSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceCIDRStatus", "ServiceCIDRStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ServiceCIDRList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ServiceCIDR{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceCIDR", "ServiceCIDR", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ServiceCIDRList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ServiceCIDRSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ServiceCIDRSpec{`,
+ `CIDRs:` + fmt.Sprintf("%v", this.CIDRs) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ServiceCIDRStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&ServiceCIDRStatus{`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
@@ -3269,7 +4077,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *IPBlock) Unmarshal(dAtA []byte) error {
+func (m *IPAddress) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -3292,17 +4100,17 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: IPBlock: wiretype end group for non-group")
+ return fmt.Errorf("proto: IPAddress: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -3312,29 +4120,30 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.CIDR = string(dAtA[iNdEx:postIndex])
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -3344,23 +4153,24 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Except = append(m.Except, string(dAtA[iNdEx:postIndex]))
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -3383,7 +4193,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *Ingress) Unmarshal(dAtA []byte) error {
+func (m *IPAddressList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -3406,15 +4216,15 @@ func (m *Ingress) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: Ingress: wiretype end group for non-group")
+ return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -3441,46 +4251,13 @@ func (m *Ingress) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -3507,7 +4284,8 @@ func (m *Ingress) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Items = append(m.Items, IPAddress{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -3532,7 +4310,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *IngressBackend) Unmarshal(dAtA []byte) error {
+func (m *IPAddressSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -3555,51 +4333,15 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group")
+ return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Resource == nil {
- m.Resource = &v11.TypedLocalObjectReference{}
- }
- if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -3626,10 +4368,10 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Service == nil {
- m.Service = &IngressServiceBackend{}
+ if m.ParentRef == nil {
+ m.ParentRef = &ParentReference{}
}
- if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -3654,7 +4396,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *IngressClass) Unmarshal(dAtA []byte) error {
+func (m *IPBlock) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -3677,17 +4419,17 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: IngressClass: wiretype end group for non-group")
+ return fmt.Errorf("proto: IPBlock: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -3697,30 +4439,29 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.CIDR = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -3730,24 +4471,23 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Except = append(m.Except, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -3770,7 +4510,7 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *IngressClassList) Unmarshal(dAtA []byte) error {
+func (m *Ingress) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -3793,15 +4533,15 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group")
+ return fmt.Errorf("proto: Ingress: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -3828,13 +4568,13 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -3861,8 +4601,40 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Items = append(m.Items, IngressClass{})
- if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -3887,7 +4659,7 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
+func (m *IngressBackend) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -3910,50 +4682,17 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: IngressClassParametersReference: wiretype end group for non-group")
+ return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: IngressClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := string(dAtA[iNdEx:postIndex])
- m.APIGroup = &s
- iNdEx = postIndex
- case 2:
+ case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -3963,61 +4702,33 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Kind = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
+ if m.Resource == nil {
+ m.Resource = &v11.TypedLocalObjectReference{}
}
- if postIndex > l {
- return io.ErrUnexpectedEOF
+ if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -4027,57 +4738,27 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := string(dAtA[iNdEx:postIndex])
- m.Scope = &s
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
+ if m.Service == nil {
+ m.Service = &IngressServiceBackend{}
}
- if postIndex > l {
- return io.ErrUnexpectedEOF
+ if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- s := string(dAtA[iNdEx:postIndex])
- m.Namespace = &s
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -4100,7 +4781,7 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
+func (m *IngressClass) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4123,17 +4804,17 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group")
+ return fmt.Errorf("proto: IngressClass: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -4143,27 +4824,28 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Controller = string(dAtA[iNdEx:postIndex])
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4190,10 +4872,7 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Parameters == nil {
- m.Parameters = &IngressClassParametersReference{}
- }
- if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -4218,7 +4897,7 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *IngressList) Unmarshal(dAtA []byte) error {
+func (m *IngressClassList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4241,10 +4920,10 @@ func (m *IngressList) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: IngressList: wiretype end group for non-group")
+ return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
@@ -4309,7 +4988,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Items = append(m.Items, Ingress{})
+ m.Items = append(m.Items, IngressClass{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
@@ -4335,7 +5014,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
+func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4358,15 +5037,15 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group")
+ return fmt.Errorf("proto: IngressClassParametersReference: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IngressClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -4394,11 +5073,12 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.IP = string(dAtA[iNdEx:postIndex])
+ s := string(dAtA[iNdEx:postIndex])
+ m.APIGroup = &s
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -4426,13 +5106,45 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Hostname = string(dAtA[iNdEx:postIndex])
+ m.Kind = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -4442,25 +5154,57 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Ports = append(m.Ports, IngressPortStatus{})
- if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
+ s := string(dAtA[iNdEx:postIndex])
+ m.Scope = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
}
+ s := string(dAtA[iNdEx:postIndex])
+ m.Namespace = &s
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -4483,7 +5227,7 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error {
+func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4506,15 +5250,47 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group")
+ return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Controller = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4541,8 +5317,10 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{})
- if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if m.Parameters == nil {
+ m.Parameters = &IngressClassParametersReference{}
+ }
+ if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -4567,7 +5345,7 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
+func (m *IngressList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4590,36 +5368,17 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group")
+ return fmt.Errorf("proto: IngressList: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
- }
- m.Port = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Port |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -4629,29 +5388,30 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex])
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 3:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -4661,24 +5421,25 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := string(dAtA[iNdEx:postIndex])
- m.Error = &s
+ m.Items = append(m.Items, Ingress{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -4701,7 +5462,7 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *IngressRule) Unmarshal(dAtA []byte) error {
+func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4724,15 +5485,15 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: IngressRule: wiretype end group for non-group")
+ return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -4760,11 +5521,43 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Host = string(dAtA[iNdEx:postIndex])
+ m.IP = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Hostname = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4791,7 +5584,8 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Ports = append(m.Ports, IngressPortStatus{})
+ if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -4816,7 +5610,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *IngressRuleValue) Unmarshal(dAtA []byte) error {
+func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4839,15 +5633,15 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group")
+ return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4874,10 +5668,8 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.HTTP == nil {
- m.HTTP = &HTTPIngressRuleValue{}
- }
- if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{})
+ if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -4902,7 +5694,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error {
+func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4925,15 +5717,34 @@ func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: IngressServiceBackend: wiretype end group for non-group")
+ return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: IngressServiceBackend: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ m.Port = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Port |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -4961,13 +5772,13 @@ func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
+ m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 2:
+ case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -4977,24 +5788,24 @@ func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Error = &s
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -5017,7 +5828,7 @@ func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *IngressSpec) Unmarshal(dAtA []byte) error {
+func (m *IngressRule) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5040,17 +5851,17 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group")
+ return fmt.Errorf("proto: IngressRule: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DefaultBackend", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5060,65 +5871,27 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.DefaultBackend == nil {
- m.DefaultBackend = &IngressBackend{}
- }
- if err := m.DefaultBackend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Host = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.TLS = append(m.TLS, IngressTLS{})
- if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5145,44 +5918,10 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Rules = append(m.Rules, IngressRule{})
- if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := string(dAtA[iNdEx:postIndex])
- m.IngressClassName = &s
- iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -5204,7 +5943,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *IngressStatus) Unmarshal(dAtA []byte) error {
+func (m *IngressRuleValue) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5227,15 +5966,15 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group")
+ return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5262,7 +6001,10 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if m.HTTP == nil {
+ m.HTTP = &HTTPIngressRuleValue{}
+ }
+ if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -5287,7 +6029,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *IngressTLS) Unmarshal(dAtA []byte) error {
+func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5310,15 +6052,15 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group")
+ return fmt.Errorf("proto: IngressServiceBackend: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IngressServiceBackend: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -5346,13 +6088,13 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex]))
+ m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5362,23 +6104,24 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.SecretName = string(dAtA[iNdEx:postIndex])
+ if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -5401,7 +6144,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *NetworkPolicy) Unmarshal(dAtA []byte) error {
+func (m *IngressSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5424,15 +6167,15 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group")
+ return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field DefaultBackend", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5459,13 +6202,16 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if m.DefaultBackend == nil {
+ m.DefaultBackend = &IngressBackend{}
+ }
+ if err := m.DefaultBackend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5492,63 +6238,14 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.TLS = append(m.TLS, IngressTLS{})
+ if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5575,16 +6272,99 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Ports = append(m.Ports, NetworkPolicyPort{})
- if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Rules = append(m.Rules, IngressRule{})
+ if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 2:
+ case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType)
}
- var msglen int
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.IngressClassName = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType)
+ }
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5609,8 +6389,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.To = append(m.To, NetworkPolicyPeer{})
- if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -5635,7 +6414,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error {
+func (m *IngressTLS) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5658,15 +6437,129 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group")
+ return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SecretName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5693,14 +6586,13 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Ports = append(m.Ports, NetworkPolicyPort{})
- if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5727,10 +6619,1020 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.From = append(m.From, NetworkPolicyPeer{})
- if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
return err
}
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ports = append(m.Ports, NetworkPolicyPort{})
+ if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.To = append(m.To, NetworkPolicyPeer{})
+ if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ports = append(m.Ports, NetworkPolicyPort{})
+ if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.From = append(m.From, NetworkPolicyPeer{})
+ if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, NetworkPolicy{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PodSelector == nil {
+ m.PodSelector = &v1.LabelSelector{}
+ }
+ if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NamespaceSelector == nil {
+ m.NamespaceSelector = &v1.LabelSelector{}
+ }
+ if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.IPBlock == nil {
+ m.IPBlock = &IPBlock{}
+ }
+ if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex])
+ m.Protocol = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Port == nil {
+ m.Port = &intstr.IntOrString{}
+ }
+ if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.EndPort = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{})
+ if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Egress = append(m.Egress, NetworkPolicyEgressRule{})
+ if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ParentReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ParentReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Group = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Resource = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -5753,7 +7655,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error {
+func (m *ServiceBackendPort) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5776,17 +7678,17 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group")
+ return fmt.Errorf("proto: ServiceBackendPort: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: ServiceBackendPort: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5796,30 +7698,29 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType)
}
- var msglen int
+ m.Number = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5829,26 +7730,11 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ m.Number |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Items = append(m.Items, NetworkPolicy{})
- if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -5870,7 +7756,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error {
+func (m *ServiceCIDR) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5893,15 +7779,15 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group")
+ return fmt.Errorf("proto: ServiceCIDR: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: ServiceCIDR: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5928,16 +7814,13 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.PodSelector == nil {
- m.PodSelector = &v1.LabelSelector{}
- }
- if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5964,16 +7847,13 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.NamespaceSelector == nil {
- m.NamespaceSelector = &v1.LabelSelector{}
- }
- if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -6000,10 +7880,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.IPBlock == nil {
- m.IPBlock = &IPBlock{}
- }
- if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -6028,7 +7905,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error {
+func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -6051,17 +7928,17 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group")
+ return fmt.Errorf("proto: ServiceCIDRList: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: ServiceCIDRList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -6071,28 +7948,28 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex])
- m.Protocol = &s
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -6119,33 +7996,11 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Port == nil {
- m.Port = &intstr.IntOrString{}
- }
- if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Items = append(m.Items, ServiceCIDR{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType)
- }
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.EndPort = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -6167,7 +8022,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error {
+func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -6190,116 +8045,15 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group")
+ return fmt.Errorf("proto: ServiceCIDRSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: ServiceCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{})
- if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Egress = append(m.Egress, NetworkPolicyEgressRule{})
- if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field CIDRs", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -6327,7 +8081,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex]))
+ m.CIDRs = append(m.CIDRs, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -6350,7 +8104,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *ServiceBackendPort) Unmarshal(dAtA []byte) error {
+func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -6373,17 +8127,17 @@ func (m *ServiceBackendPort) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: ServiceBackendPort: wiretype end group for non-group")
+ return fmt.Errorf("proto: ServiceCIDRStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: ServiceBackendPort: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: ServiceCIDRStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -6393,43 +8147,26 @@ func (m *ServiceBackendPort) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType)
- }
- m.Number = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Number |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
+ m.Conditions = append(m.Conditions, v1.Condition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto
index c72fdc8f3..e3e3e9215 100644
--- a/vendor/k8s.io/api/networking/v1/generated.proto
+++ b/vendor/k8s.io/api/networking/v1/generated.proto
@@ -72,6 +72,44 @@ message HTTPIngressRuleValue {
repeated HTTPIngressPath paths = 1;
}
+// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
+// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
+// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
+// the name of the object is the IP address in canonical format, four decimal digits separated
+// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6.
+// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1
+// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1
+message IPAddress {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // spec is the desired state of the IPAddress.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ optional IPAddressSpec spec = 2;
+}
+
+// IPAddressList contains a list of IPAddress.
+message IPAddressList {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // items is the list of IPAddresses.
+ repeated IPAddress items = 2;
+}
+
+// IPAddressSpec describe the attributes in an IP Address.
+message IPAddressSpec {
+ // ParentRef references the resource that an IPAddress is attached to.
+ // An IPAddress must reference a parent object.
+ // +required
+ optional ParentReference parentRef = 1;
+}
+
// IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed
// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs
// that should not be included within this rule.
@@ -540,6 +578,25 @@ message NetworkPolicySpec {
repeated string policyTypes = 4;
}
+// ParentReference describes a reference to a parent object.
+message ParentReference {
+ // Group is the group of the object being referenced.
+ // +optional
+ optional string group = 1;
+
+ // Resource is the resource of the object being referenced.
+ // +required
+ optional string resource = 2;
+
+ // Namespace is the namespace of the object being referenced.
+ // +optional
+ optional string namespace = 3;
+
+ // Name is the name of the object being referenced.
+ // +required
+ optional string name = 4;
+}
+
// ServiceBackendPort is the service port being referenced.
// +structType=atomic
message ServiceBackendPort {
@@ -554,3 +611,55 @@ message ServiceBackendPort {
optional int32 number = 2;
}
+// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
+// This range is used to allocate ClusterIPs to Service objects.
+message ServiceCIDR {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // spec is the desired state of the ServiceCIDR.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ optional ServiceCIDRSpec spec = 2;
+
+ // status represents the current state of the ServiceCIDR.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ optional ServiceCIDRStatus status = 3;
+}
+
+// ServiceCIDRList contains a list of ServiceCIDR objects.
+message ServiceCIDRList {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // items is the list of ServiceCIDRs.
+ repeated ServiceCIDR items = 2;
+}
+
+// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.
+message ServiceCIDRSpec {
+ // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
+ // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
+ // This field is immutable.
+ // +optional
+ // +listType=atomic
+ repeated string cidrs = 1;
+}
+
+// ServiceCIDRStatus describes the current state of the ServiceCIDR.
+message ServiceCIDRStatus {
+ // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
+ // Current service state
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
+}
+
diff --git a/vendor/k8s.io/api/networking/v1/register.go b/vendor/k8s.io/api/networking/v1/register.go
index a200d5437..b9bdcb78c 100644
--- a/vendor/k8s.io/api/networking/v1/register.go
+++ b/vendor/k8s.io/api/networking/v1/register.go
@@ -50,6 +50,10 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&IngressClassList{},
&NetworkPolicy{},
&NetworkPolicyList{},
+ &IPAddress{},
+ &IPAddressList{},
+ &ServiceCIDR{},
+ &ServiceCIDRList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go
index d75e27558..216647cee 100644
--- a/vendor/k8s.io/api/networking/v1/types.go
+++ b/vendor/k8s.io/api/networking/v1/types.go
@@ -635,3 +635,133 @@ type IngressClassList struct {
// items is the list of IngressClasses.
Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"`
}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.33
+
+// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
+// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
+// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
+// the name of the object is the IP address in canonical format, four decimal digits separated
+// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6.
+// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1
+// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1
+type IPAddress struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // spec is the desired state of the IPAddress.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ Spec IPAddressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// IPAddressSpec describe the attributes in an IP Address.
+type IPAddressSpec struct {
+ // ParentRef references the resource that an IPAddress is attached to.
+ // An IPAddress must reference a parent object.
+ // +required
+ ParentRef *ParentReference `json:"parentRef,omitempty" protobuf:"bytes,1,opt,name=parentRef"`
+}
+
+// ParentReference describes a reference to a parent object.
+type ParentReference struct {
+ // Group is the group of the object being referenced.
+ // +optional
+ Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"`
+ // Resource is the resource of the object being referenced.
+ // +required
+ Resource string `json:"resource,omitempty" protobuf:"bytes,2,opt,name=resource"`
+ // Namespace is the namespace of the object being referenced.
+ // +optional
+ Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
+ // Name is the name of the object being referenced.
+ // +required
+ Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.33
+
+// IPAddressList contains a list of IPAddress.
+type IPAddressList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // items is the list of IPAddresses.
+ Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.33
+
+// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
+// This range is used to allocate ClusterIPs to Service objects.
+type ServiceCIDR struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // spec is the desired state of the ServiceCIDR.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+ // status represents the current state of the ServiceCIDR.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.
+type ServiceCIDRSpec struct {
+ // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
+ // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
+ // This field is immutable.
+ // +optional
+ // +listType=atomic
+ CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"`
+}
+
+const (
+ // ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the
+ // apiserver to allocate ClusterIPs for Services.
+ ServiceCIDRConditionReady = "Ready"
+ // ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is
+ // being deleted.
+ ServiceCIDRReasonTerminating = "Terminating"
+)
+
+// ServiceCIDRStatus describes the current state of the ServiceCIDR.
+type ServiceCIDRStatus struct {
+ // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
+ // Current service state
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.33
+
+// ServiceCIDRList contains a list of ServiceCIDR objects.
+type ServiceCIDRList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // items is the list of ServiceCIDRs.
+ Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
index ff080540d..0e294848b 100644
--- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
@@ -47,6 +47,35 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string {
return map_HTTPIngressRuleValue
}
+var map_IPAddress = map[string]string{
+ "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (IPAddress) SwaggerDoc() map[string]string {
+ return map_IPAddress
+}
+
+var map_IPAddressList = map[string]string{
+ "": "IPAddressList contains a list of IPAddress.",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "items is the list of IPAddresses.",
+}
+
+func (IPAddressList) SwaggerDoc() map[string]string {
+ return map_IPAddressList
+}
+
+var map_IPAddressSpec = map[string]string{
+ "": "IPAddressSpec describe the attributes in an IP Address.",
+ "parentRef": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.",
+}
+
+func (IPAddressSpec) SwaggerDoc() map[string]string {
+ return map_IPAddressSpec
+}
+
var map_IPBlock = map[string]string{
"": "IPBlock describes a particular CIDR (Ex. \"192.168.1.0/24\",\"2001:db8::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.",
"cidr": "cidr is a string representing the IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"",
@@ -294,6 +323,18 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string {
return map_NetworkPolicySpec
}
+var map_ParentReference = map[string]string{
+ "": "ParentReference describes a reference to a parent object.",
+ "group": "Group is the group of the object being referenced.",
+ "resource": "Resource is the resource of the object being referenced.",
+ "namespace": "Namespace is the namespace of the object being referenced.",
+ "name": "Name is the name of the object being referenced.",
+}
+
+func (ParentReference) SwaggerDoc() map[string]string {
+ return map_ParentReference
+}
+
var map_ServiceBackendPort = map[string]string{
"": "ServiceBackendPort is the service port being referenced.",
"name": "name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".",
@@ -304,4 +345,43 @@ func (ServiceBackendPort) SwaggerDoc() map[string]string {
return map_ServiceBackendPort
}
+var map_ServiceCIDR = map[string]string{
+ "": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "status": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (ServiceCIDR) SwaggerDoc() map[string]string {
+ return map_ServiceCIDR
+}
+
+var map_ServiceCIDRList = map[string]string{
+ "": "ServiceCIDRList contains a list of ServiceCIDR objects.",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "items is the list of ServiceCIDRs.",
+}
+
+func (ServiceCIDRList) SwaggerDoc() map[string]string {
+ return map_ServiceCIDRList
+}
+
+var map_ServiceCIDRSpec = map[string]string{
+ "": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.",
+ "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.",
+}
+
+func (ServiceCIDRSpec) SwaggerDoc() map[string]string {
+ return map_ServiceCIDRSpec
+}
+
+var map_ServiceCIDRStatus = map[string]string{
+ "": "ServiceCIDRStatus describes the current state of the ServiceCIDR.",
+ "conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state",
+}
+
+func (ServiceCIDRStatus) SwaggerDoc() map[string]string {
+ return map_ServiceCIDRStatus
+}
+
// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/networking/v1/well_known_labels.go b/vendor/k8s.io/api/networking/v1/well_known_labels.go
new file mode 100644
index 000000000..28e2e8f3f
--- /dev/null
+++ b/vendor/k8s.io/api/networking/v1/well_known_labels.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+const (
+
+ // TODO: Use IPFamily as field with a field selector,And the value is set based on
+ // the name at create time and immutable.
+ // LabelIPAddressFamily is used to indicate the IP family of a Kubernetes IPAddress.
+ // This label simplify dual-stack client operations allowing to obtain the list of
+ // IP addresses filtered by family.
+ LabelIPAddressFamily = "ipaddress.kubernetes.io/ip-family"
+ // LabelManagedBy is used to indicate the controller or entity that manages
+ // an IPAddress. This label aims to enable different IPAddress
+ // objects to be managed by different controllers or entities within the
+ // same cluster. It is highly recommended to configure this label for all
+ // IPAddress objects.
+ LabelManagedBy = "ipaddress.kubernetes.io/managed-by"
+)
diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go
index 540873833..9ce6435a4 100644
--- a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go
@@ -73,6 +73,87 @@ func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPAddress) DeepCopyInto(out *IPAddress) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress.
+func (in *IPAddress) DeepCopy() *IPAddress {
+ if in == nil {
+ return nil
+ }
+ out := new(IPAddress)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IPAddress) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPAddressList) DeepCopyInto(out *IPAddressList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]IPAddress, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList.
+func (in *IPAddressList) DeepCopy() *IPAddressList {
+ if in == nil {
+ return nil
+ }
+ out := new(IPAddressList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IPAddressList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) {
+ *out = *in
+ if in.ParentRef != nil {
+ in, out := &in.ParentRef, &out.ParentRef
+ *out = new(ParentReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec.
+func (in *IPAddressSpec) DeepCopy() *IPAddressSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IPAddressSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPBlock) DeepCopyInto(out *IPBlock) {
*out = *in
@@ -711,6 +792,22 @@ func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ParentReference) DeepCopyInto(out *ParentReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference.
+func (in *ParentReference) DeepCopy() *ParentReference {
+ if in == nil {
+ return nil
+ }
+ out := new(ParentReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceBackendPort) DeepCopyInto(out *ServiceBackendPort) {
*out = *in
@@ -726,3 +823,108 @@ func (in *ServiceBackendPort) DeepCopy() *ServiceBackendPort {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR.
+func (in *ServiceCIDR) DeepCopy() *ServiceCIDR {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCIDR)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ServiceCIDR) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ServiceCIDR, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList.
+func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCIDRList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ServiceCIDRList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) {
+ *out = *in
+ if in.CIDRs != nil {
+ in, out := &in.CIDRs, &out.CIDRs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec.
+func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCIDRSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus.
+func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCIDRStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go
index 21e8c671a..6894d8c53 100644
--- a/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go
+++ b/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go
@@ -21,6 +21,18 @@ limitations under the License.
package v1
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *IPAddress) APILifecycleIntroduced() (major, minor int) {
+ return 1, 33
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 33
+}
+
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *Ingress) APILifecycleIntroduced() (major, minor int) {
@@ -56,3 +68,15 @@ func (in *NetworkPolicy) APILifecycleIntroduced() (major, minor int) {
func (in *NetworkPolicyList) APILifecycleIntroduced() (major, minor int) {
return 1, 19
}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) {
+ return 1, 33
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 33
+}
diff --git a/vendor/k8s.io/api/networking/v1alpha1/doc.go b/vendor/k8s.io/api/networking/v1alpha1/doc.go
index 3827b0418..55264ae70 100644
--- a/vendor/k8s.io/api/networking/v1alpha1/doc.go
+++ b/vendor/k8s.io/api/networking/v1alpha1/doc.go
@@ -20,4 +20,4 @@ limitations under the License.
// +k8s:prerelease-lifecycle-gen=true
// +groupName=networking.k8s.io
-package v1alpha1 // import "k8s.io/api/networking/v1alpha1"
+package v1alpha1
diff --git a/vendor/k8s.io/api/networking/v1beta1/doc.go b/vendor/k8s.io/api/networking/v1beta1/doc.go
index fa6d01cea..c5a03e04e 100644
--- a/vendor/k8s.io/api/networking/v1beta1/doc.go
+++ b/vendor/k8s.io/api/networking/v1beta1/doc.go
@@ -20,4 +20,4 @@ limitations under the License.
// +k8s:prerelease-lifecycle-gen=true
// +groupName=networking.k8s.io
-package v1beta1 // import "k8s.io/api/networking/v1beta1"
+package v1beta1
diff --git a/vendor/k8s.io/api/node/v1/doc.go b/vendor/k8s.io/api/node/v1/doc.go
index 57ca52445..3239af703 100644
--- a/vendor/k8s.io/api/node/v1/doc.go
+++ b/vendor/k8s.io/api/node/v1/doc.go
@@ -20,4 +20,4 @@ limitations under the License.
// +k8s:prerelease-lifecycle-gen=true
// +groupName=node.k8s.io
-package v1 // import "k8s.io/api/node/v1"
+package v1
diff --git a/vendor/k8s.io/api/node/v1alpha1/doc.go b/vendor/k8s.io/api/node/v1alpha1/doc.go
index dfe99540b..2f3d46ac2 100644
--- a/vendor/k8s.io/api/node/v1alpha1/doc.go
+++ b/vendor/k8s.io/api/node/v1alpha1/doc.go
@@ -20,4 +20,4 @@ limitations under the License.
// +groupName=node.k8s.io
-package v1alpha1 // import "k8s.io/api/node/v1alpha1"
+package v1alpha1
diff --git a/vendor/k8s.io/api/node/v1beta1/doc.go b/vendor/k8s.io/api/node/v1beta1/doc.go
index c76ba89c4..7b47c8df6 100644
--- a/vendor/k8s.io/api/node/v1beta1/doc.go
+++ b/vendor/k8s.io/api/node/v1beta1/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// +groupName=node.k8s.io
-package v1beta1 // import "k8s.io/api/node/v1beta1"
+package v1beta1
diff --git a/vendor/k8s.io/api/policy/v1/doc.go b/vendor/k8s.io/api/policy/v1/doc.go
index c51e02685..ff47e7fd4 100644
--- a/vendor/k8s.io/api/policy/v1/doc.go
+++ b/vendor/k8s.io/api/policy/v1/doc.go
@@ -22,4 +22,4 @@ limitations under the License.
// Package policy is for any kind of policy object. Suitable examples, even if
// they aren't all here, are PodDisruptionBudget,
// NetworkPolicy, etc.
-package v1 // import "k8s.io/api/policy/v1"
+package v1
diff --git a/vendor/k8s.io/api/policy/v1/generated.proto b/vendor/k8s.io/api/policy/v1/generated.proto
index 57128e811..953489072 100644
--- a/vendor/k8s.io/api/policy/v1/generated.proto
+++ b/vendor/k8s.io/api/policy/v1/generated.proto
@@ -115,9 +115,6 @@ message PodDisruptionBudgetSpec {
// Additional policies may be added in the future.
// Clients making eviction decisions should disallow eviction of unhealthy pods
// if they encounter an unrecognized policy in this field.
- //
- // This field is beta-level. The eviction API uses this field when
- // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).
// +optional
optional string unhealthyPodEvictionPolicy = 4;
}
diff --git a/vendor/k8s.io/api/policy/v1/types.go b/vendor/k8s.io/api/policy/v1/types.go
index f05367ebe..4e7436789 100644
--- a/vendor/k8s.io/api/policy/v1/types.go
+++ b/vendor/k8s.io/api/policy/v1/types.go
@@ -70,9 +70,6 @@ type PodDisruptionBudgetSpec struct {
// Additional policies may be added in the future.
// Clients making eviction decisions should disallow eviction of unhealthy pods
// if they encounter an unrecognized policy in this field.
- //
- // This field is beta-level. The eviction API uses this field when
- // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).
// +optional
UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty" protobuf:"bytes,4,opt,name=unhealthyPodEvictionPolicy"`
}
diff --git a/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go
index 799b0794a..9b2f5b945 100644
--- a/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go
@@ -63,7 +63,7 @@ var map_PodDisruptionBudgetSpec = map[string]string{
"minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".",
"selector": "Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.",
"maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".",
- "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).",
+ "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.",
}
func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go
index 76da54b4c..777106c60 100644
--- a/vendor/k8s.io/api/policy/v1beta1/doc.go
+++ b/vendor/k8s.io/api/policy/v1beta1/doc.go
@@ -22,4 +22,4 @@ limitations under the License.
// Package policy is for any kind of policy object. Suitable examples, even if
// they aren't all here, are PodDisruptionBudget,
// NetworkPolicy, etc.
-package v1beta1 // import "k8s.io/api/policy/v1beta1"
+package v1beta1
diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto
index 91e33f233..e0cbe00f1 100644
--- a/vendor/k8s.io/api/policy/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto
@@ -115,9 +115,6 @@ message PodDisruptionBudgetSpec {
// Additional policies may be added in the future.
// Clients making eviction decisions should disallow eviction of unhealthy pods
// if they encounter an unrecognized policy in this field.
- //
- // This field is beta-level. The eviction API uses this field when
- // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).
// +optional
optional string unhealthyPodEvictionPolicy = 4;
}
diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go
index bc5f970d2..9bba454f9 100644
--- a/vendor/k8s.io/api/policy/v1beta1/types.go
+++ b/vendor/k8s.io/api/policy/v1beta1/types.go
@@ -67,9 +67,6 @@ type PodDisruptionBudgetSpec struct {
// Additional policies may be added in the future.
// Clients making eviction decisions should disallow eviction of unhealthy pods
// if they encounter an unrecognized policy in this field.
- //
- // This field is beta-level. The eviction API uses this field when
- // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).
// +optional
UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty" protobuf:"bytes,4,opt,name=unhealthyPodEvictionPolicy"`
}
diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
index 4a79d7594..cffc9a548 100644
--- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
@@ -63,7 +63,7 @@ var map_PodDisruptionBudgetSpec = map[string]string{
"minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".",
"selector": "Label query over pods whose evictions are managed by the disruption budget. A null selector selects no pods. An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. In policy/v1, an empty selector will select all pods in the namespace.",
"maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".",
- "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).",
+ "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.",
}
func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/rbac/v1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go
index b0e4e5b5b..408546274 100644
--- a/vendor/k8s.io/api/rbac/v1/doc.go
+++ b/vendor/k8s.io/api/rbac/v1/doc.go
@@ -20,4 +20,4 @@ limitations under the License.
// +k8s:prerelease-lifecycle-gen=true
// +groupName=rbac.authorization.k8s.io
-package v1 // import "k8s.io/api/rbac/v1"
+package v1
diff --git a/vendor/k8s.io/api/rbac/v1alpha1/doc.go b/vendor/k8s.io/api/rbac/v1alpha1/doc.go
index 918b8a337..70d3c0e97 100644
--- a/vendor/k8s.io/api/rbac/v1alpha1/doc.go
+++ b/vendor/k8s.io/api/rbac/v1alpha1/doc.go
@@ -20,4 +20,4 @@ limitations under the License.
// +groupName=rbac.authorization.k8s.io
-package v1alpha1 // import "k8s.io/api/rbac/v1alpha1"
+package v1alpha1
diff --git a/vendor/k8s.io/api/rbac/v1beta1/doc.go b/vendor/k8s.io/api/rbac/v1beta1/doc.go
index 156f273e6..504a58d8b 100644
--- a/vendor/k8s.io/api/rbac/v1beta1/doc.go
+++ b/vendor/k8s.io/api/rbac/v1beta1/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// +groupName=rbac.authorization.k8s.io
-package v1beta1 // import "k8s.io/api/rbac/v1beta1"
+package v1beta1
diff --git a/vendor/k8s.io/api/resource/v1alpha3/doc.go b/vendor/k8s.io/api/resource/v1alpha3/doc.go
index ffc21307d..82e64f1d0 100644
--- a/vendor/k8s.io/api/resource/v1alpha3/doc.go
+++ b/vendor/k8s.io/api/resource/v1alpha3/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// +groupName=resource.k8s.io
// Package v1alpha3 is the v1alpha3 version of the resource API.
-package v1alpha3 // import "k8s.io/api/resource/v1alpha3"
+package v1alpha3
diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go
index 540f7b818..716492fea 100644
--- a/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go
+++ b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go
@@ -29,6 +29,7 @@ import (
v11 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
math "math"
math_bits "math/bits"
@@ -161,10 +162,66 @@ func (m *CELDeviceSelector) XXX_DiscardUnknown() {
var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo
+func (m *Counter) Reset() { *m = Counter{} }
+func (*Counter) ProtoMessage() {}
+func (*Counter) Descriptor() ([]byte, []int) {
+ return fileDescriptor_66649ee9bbcd89d2, []int{4}
+}
+func (m *Counter) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Counter) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Counter.Merge(m, src)
+}
+func (m *Counter) XXX_Size() int {
+ return m.Size()
+}
+func (m *Counter) XXX_DiscardUnknown() {
+ xxx_messageInfo_Counter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Counter proto.InternalMessageInfo
+
+func (m *CounterSet) Reset() { *m = CounterSet{} }
+func (*CounterSet) ProtoMessage() {}
+func (*CounterSet) Descriptor() ([]byte, []int) {
+ return fileDescriptor_66649ee9bbcd89d2, []int{5}
+}
+func (m *CounterSet) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CounterSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CounterSet) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CounterSet.Merge(m, src)
+}
+func (m *CounterSet) XXX_Size() int {
+ return m.Size()
+}
+func (m *CounterSet) XXX_DiscardUnknown() {
+ xxx_messageInfo_CounterSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CounterSet proto.InternalMessageInfo
+
func (m *Device) Reset() { *m = Device{} }
func (*Device) ProtoMessage() {}
func (*Device) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{4}
+ return fileDescriptor_66649ee9bbcd89d2, []int{6}
}
func (m *Device) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -192,7 +249,7 @@ var xxx_messageInfo_Device proto.InternalMessageInfo
func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} }
func (*DeviceAllocationConfiguration) ProtoMessage() {}
func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{5}
+ return fileDescriptor_66649ee9bbcd89d2, []int{7}
}
func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -220,7 +277,7 @@ var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo
func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} }
func (*DeviceAllocationResult) ProtoMessage() {}
func (*DeviceAllocationResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{6}
+ return fileDescriptor_66649ee9bbcd89d2, []int{8}
}
func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -248,7 +305,7 @@ var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo
func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} }
func (*DeviceAttribute) ProtoMessage() {}
func (*DeviceAttribute) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{7}
+ return fileDescriptor_66649ee9bbcd89d2, []int{9}
}
func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -276,7 +333,7 @@ var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo
func (m *DeviceClaim) Reset() { *m = DeviceClaim{} }
func (*DeviceClaim) ProtoMessage() {}
func (*DeviceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{8}
+ return fileDescriptor_66649ee9bbcd89d2, []int{10}
}
func (m *DeviceClaim) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -304,7 +361,7 @@ var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo
func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} }
func (*DeviceClaimConfiguration) ProtoMessage() {}
func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{9}
+ return fileDescriptor_66649ee9bbcd89d2, []int{11}
}
func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -332,7 +389,7 @@ var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo
func (m *DeviceClass) Reset() { *m = DeviceClass{} }
func (*DeviceClass) ProtoMessage() {}
func (*DeviceClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{10}
+ return fileDescriptor_66649ee9bbcd89d2, []int{12}
}
func (m *DeviceClass) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -360,7 +417,7 @@ var xxx_messageInfo_DeviceClass proto.InternalMessageInfo
func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} }
func (*DeviceClassConfiguration) ProtoMessage() {}
func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{11}
+ return fileDescriptor_66649ee9bbcd89d2, []int{13}
}
func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -388,7 +445,7 @@ var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo
func (m *DeviceClassList) Reset() { *m = DeviceClassList{} }
func (*DeviceClassList) ProtoMessage() {}
func (*DeviceClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{12}
+ return fileDescriptor_66649ee9bbcd89d2, []int{14}
}
func (m *DeviceClassList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -416,7 +473,7 @@ var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo
func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} }
func (*DeviceClassSpec) ProtoMessage() {}
func (*DeviceClassSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{13}
+ return fileDescriptor_66649ee9bbcd89d2, []int{15}
}
func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -444,7 +501,7 @@ var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo
func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} }
func (*DeviceConfiguration) ProtoMessage() {}
func (*DeviceConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{14}
+ return fileDescriptor_66649ee9bbcd89d2, []int{16}
}
func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -472,7 +529,7 @@ var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo
func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} }
func (*DeviceConstraint) ProtoMessage() {}
func (*DeviceConstraint) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{15}
+ return fileDescriptor_66649ee9bbcd89d2, []int{17}
}
func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -497,10 +554,38 @@ func (m *DeviceConstraint) XXX_DiscardUnknown() {
var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo
+func (m *DeviceCounterConsumption) Reset() { *m = DeviceCounterConsumption{} }
+func (*DeviceCounterConsumption) ProtoMessage() {}
+func (*DeviceCounterConsumption) Descriptor() ([]byte, []int) {
+ return fileDescriptor_66649ee9bbcd89d2, []int{18}
+}
+func (m *DeviceCounterConsumption) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceCounterConsumption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceCounterConsumption) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceCounterConsumption.Merge(m, src)
+}
+func (m *DeviceCounterConsumption) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceCounterConsumption) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceCounterConsumption.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceCounterConsumption proto.InternalMessageInfo
+
func (m *DeviceRequest) Reset() { *m = DeviceRequest{} }
func (*DeviceRequest) ProtoMessage() {}
func (*DeviceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{16}
+ return fileDescriptor_66649ee9bbcd89d2, []int{19}
}
func (m *DeviceRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -528,7 +613,7 @@ var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo
func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} }
func (*DeviceRequestAllocationResult) ProtoMessage() {}
func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{17}
+ return fileDescriptor_66649ee9bbcd89d2, []int{20}
}
func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -556,7 +641,7 @@ var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo
func (m *DeviceSelector) Reset() { *m = DeviceSelector{} }
func (*DeviceSelector) ProtoMessage() {}
func (*DeviceSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{18}
+ return fileDescriptor_66649ee9bbcd89d2, []int{21}
}
func (m *DeviceSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -581,10 +666,206 @@ func (m *DeviceSelector) XXX_DiscardUnknown() {
var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo
+func (m *DeviceSubRequest) Reset() { *m = DeviceSubRequest{} }
+func (*DeviceSubRequest) ProtoMessage() {}
+func (*DeviceSubRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_66649ee9bbcd89d2, []int{22}
+}
+func (m *DeviceSubRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceSubRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceSubRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceSubRequest.Merge(m, src)
+}
+func (m *DeviceSubRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceSubRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceSubRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceSubRequest proto.InternalMessageInfo
+
+func (m *DeviceTaint) Reset() { *m = DeviceTaint{} }
+func (*DeviceTaint) ProtoMessage() {}
+func (*DeviceTaint) Descriptor() ([]byte, []int) {
+ return fileDescriptor_66649ee9bbcd89d2, []int{23}
+}
+func (m *DeviceTaint) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceTaint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceTaint) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceTaint.Merge(m, src)
+}
+func (m *DeviceTaint) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceTaint) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceTaint.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceTaint proto.InternalMessageInfo
+
+func (m *DeviceTaintRule) Reset() { *m = DeviceTaintRule{} }
+func (*DeviceTaintRule) ProtoMessage() {}
+func (*DeviceTaintRule) Descriptor() ([]byte, []int) {
+ return fileDescriptor_66649ee9bbcd89d2, []int{24}
+}
+func (m *DeviceTaintRule) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceTaintRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceTaintRule) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceTaintRule.Merge(m, src)
+}
+func (m *DeviceTaintRule) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceTaintRule) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceTaintRule.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceTaintRule proto.InternalMessageInfo
+
+func (m *DeviceTaintRuleList) Reset() { *m = DeviceTaintRuleList{} }
+func (*DeviceTaintRuleList) ProtoMessage() {}
+func (*DeviceTaintRuleList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_66649ee9bbcd89d2, []int{25}
+}
+func (m *DeviceTaintRuleList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceTaintRuleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceTaintRuleList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceTaintRuleList.Merge(m, src)
+}
+func (m *DeviceTaintRuleList) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceTaintRuleList) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceTaintRuleList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceTaintRuleList proto.InternalMessageInfo
+
+func (m *DeviceTaintRuleSpec) Reset() { *m = DeviceTaintRuleSpec{} }
+func (*DeviceTaintRuleSpec) ProtoMessage() {}
+func (*DeviceTaintRuleSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_66649ee9bbcd89d2, []int{26}
+}
+func (m *DeviceTaintRuleSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceTaintRuleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceTaintRuleSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceTaintRuleSpec.Merge(m, src)
+}
+func (m *DeviceTaintRuleSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceTaintRuleSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceTaintRuleSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceTaintRuleSpec proto.InternalMessageInfo
+
+func (m *DeviceTaintSelector) Reset() { *m = DeviceTaintSelector{} }
+func (*DeviceTaintSelector) ProtoMessage() {}
+func (*DeviceTaintSelector) Descriptor() ([]byte, []int) {
+ return fileDescriptor_66649ee9bbcd89d2, []int{27}
+}
+func (m *DeviceTaintSelector) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceTaintSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceTaintSelector) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceTaintSelector.Merge(m, src)
+}
+func (m *DeviceTaintSelector) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceTaintSelector) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceTaintSelector.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceTaintSelector proto.InternalMessageInfo
+
+func (m *DeviceToleration) Reset() { *m = DeviceToleration{} }
+func (*DeviceToleration) ProtoMessage() {}
+func (*DeviceToleration) Descriptor() ([]byte, []int) {
+ return fileDescriptor_66649ee9bbcd89d2, []int{28}
+}
+func (m *DeviceToleration) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceToleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceToleration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceToleration.Merge(m, src)
+}
+func (m *DeviceToleration) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceToleration) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceToleration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceToleration proto.InternalMessageInfo
+
func (m *NetworkDeviceData) Reset() { *m = NetworkDeviceData{} }
func (*NetworkDeviceData) ProtoMessage() {}
func (*NetworkDeviceData) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{19}
+ return fileDescriptor_66649ee9bbcd89d2, []int{29}
}
func (m *NetworkDeviceData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -612,7 +893,7 @@ var xxx_messageInfo_NetworkDeviceData proto.InternalMessageInfo
func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} }
func (*OpaqueDeviceConfiguration) ProtoMessage() {}
func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{20}
+ return fileDescriptor_66649ee9bbcd89d2, []int{30}
}
func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -640,7 +921,7 @@ var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo
func (m *ResourceClaim) Reset() { *m = ResourceClaim{} }
func (*ResourceClaim) ProtoMessage() {}
func (*ResourceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{21}
+ return fileDescriptor_66649ee9bbcd89d2, []int{31}
}
func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -668,7 +949,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} }
func (*ResourceClaimConsumerReference) ProtoMessage() {}
func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{22}
+ return fileDescriptor_66649ee9bbcd89d2, []int{32}
}
func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -696,7 +977,7 @@ var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo
func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} }
func (*ResourceClaimList) ProtoMessage() {}
func (*ResourceClaimList) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{23}
+ return fileDescriptor_66649ee9bbcd89d2, []int{33}
}
func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -724,7 +1005,7 @@ var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo
func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} }
func (*ResourceClaimSpec) ProtoMessage() {}
func (*ResourceClaimSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{24}
+ return fileDescriptor_66649ee9bbcd89d2, []int{34}
}
func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -752,7 +1033,7 @@ var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo
func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} }
func (*ResourceClaimStatus) ProtoMessage() {}
func (*ResourceClaimStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{25}
+ return fileDescriptor_66649ee9bbcd89d2, []int{35}
}
func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -780,7 +1061,7 @@ var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo
func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} }
func (*ResourceClaimTemplate) ProtoMessage() {}
func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{26}
+ return fileDescriptor_66649ee9bbcd89d2, []int{36}
}
func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -808,7 +1089,7 @@ var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo
func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} }
func (*ResourceClaimTemplateList) ProtoMessage() {}
func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{27}
+ return fileDescriptor_66649ee9bbcd89d2, []int{37}
}
func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -836,7 +1117,7 @@ var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo
func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} }
func (*ResourceClaimTemplateSpec) ProtoMessage() {}
func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{28}
+ return fileDescriptor_66649ee9bbcd89d2, []int{38}
}
func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -864,7 +1145,7 @@ var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo
func (m *ResourcePool) Reset() { *m = ResourcePool{} }
func (*ResourcePool) ProtoMessage() {}
func (*ResourcePool) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{29}
+ return fileDescriptor_66649ee9bbcd89d2, []int{39}
}
func (m *ResourcePool) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -892,7 +1173,7 @@ var xxx_messageInfo_ResourcePool proto.InternalMessageInfo
func (m *ResourceSlice) Reset() { *m = ResourceSlice{} }
func (*ResourceSlice) ProtoMessage() {}
func (*ResourceSlice) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{30}
+ return fileDescriptor_66649ee9bbcd89d2, []int{40}
}
func (m *ResourceSlice) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -920,7 +1201,7 @@ var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo
func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} }
func (*ResourceSliceList) ProtoMessage() {}
func (*ResourceSliceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{31}
+ return fileDescriptor_66649ee9bbcd89d2, []int{41}
}
func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -948,7 +1229,7 @@ var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo
func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} }
func (*ResourceSliceSpec) ProtoMessage() {}
func (*ResourceSliceSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{32}
+ return fileDescriptor_66649ee9bbcd89d2, []int{42}
}
func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -980,6 +1261,9 @@ func init() {
proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice.AttributesEntry")
proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice.CapacityEntry")
proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.CELDeviceSelector")
+ proto.RegisterType((*Counter)(nil), "k8s.io.api.resource.v1alpha3.Counter")
+ proto.RegisterType((*CounterSet)(nil), "k8s.io.api.resource.v1alpha3.CounterSet")
+ proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1alpha3.CounterSet.CountersEntry")
proto.RegisterType((*Device)(nil), "k8s.io.api.resource.v1alpha3.Device")
proto.RegisterType((*DeviceAllocationConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceAllocationConfiguration")
proto.RegisterType((*DeviceAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceAllocationResult")
@@ -992,9 +1276,18 @@ func init() {
proto.RegisterType((*DeviceClassSpec)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassSpec")
proto.RegisterType((*DeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceConfiguration")
proto.RegisterType((*DeviceConstraint)(nil), "k8s.io.api.resource.v1alpha3.DeviceConstraint")
+ proto.RegisterType((*DeviceCounterConsumption)(nil), "k8s.io.api.resource.v1alpha3.DeviceCounterConsumption")
+ proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1alpha3.DeviceCounterConsumption.CountersEntry")
proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequest")
proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequestAllocationResult")
proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.DeviceSelector")
+ proto.RegisterType((*DeviceSubRequest)(nil), "k8s.io.api.resource.v1alpha3.DeviceSubRequest")
+ proto.RegisterType((*DeviceTaint)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaint")
+ proto.RegisterType((*DeviceTaintRule)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintRule")
+ proto.RegisterType((*DeviceTaintRuleList)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintRuleList")
+ proto.RegisterType((*DeviceTaintRuleSpec)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintRuleSpec")
+ proto.RegisterType((*DeviceTaintSelector)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintSelector")
+ proto.RegisterType((*DeviceToleration)(nil), "k8s.io.api.resource.v1alpha3.DeviceToleration")
proto.RegisterType((*NetworkDeviceData)(nil), "k8s.io.api.resource.v1alpha3.NetworkDeviceData")
proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.OpaqueDeviceConfiguration")
proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaim")
@@ -1016,134 +1309,172 @@ func init() {
}
var fileDescriptor_66649ee9bbcd89d2 = []byte{
- // 2030 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0xcd, 0x6f, 0x1c, 0x57,
- 0xdd, 0xb3, 0xe3, 0xcf, 0xdf, 0xfa, 0x2b, 0x2f, 0xa4, 0x38, 0xa6, 0xec, 0x3a, 0x53, 0x04, 0x4e,
- 0x9b, 0xee, 0x36, 0x4e, 0xd5, 0x16, 0xc2, 0x01, 0x8f, 0xed, 0x06, 0x47, 0x89, 0xe3, 0x3c, 0xb7,
- 0x11, 0x81, 0x12, 0x78, 0x9e, 0x7d, 0xb6, 0x07, 0xcf, 0xce, 0x4c, 0xe7, 0xbd, 0x71, 0xea, 0x0b,
- 0xaa, 0xe0, 0x1e, 0xf1, 0x0f, 0x20, 0x0e, 0x48, 0x48, 0x5c, 0x80, 0xff, 0x00, 0x24, 0x90, 0x88,
- 0xe0, 0x12, 0x09, 0x0e, 0x3d, 0x2d, 0xcd, 0x22, 0xce, 0xdc, 0x73, 0x42, 0xef, 0xcd, 0x9b, 0xcf,
- 0xdd, 0x71, 0xc6, 0x55, 0xb1, 0xd2, 0xdb, 0xce, 0xef, 0xfb, 0xfd, 0xbe, 0xdf, 0x5b, 0xb8, 0x72,
- 0xf8, 0x0e, 0x6b, 0xd9, 0x5e, 0x9b, 0xf8, 0x76, 0x3b, 0xa0, 0xcc, 0x0b, 0x03, 0x8b, 0xb6, 0x8f,
- 0xae, 0x12, 0xc7, 0x3f, 0x20, 0xd7, 0xda, 0xfb, 0xd4, 0xa5, 0x01, 0xe1, 0xb4, 0xd3, 0xf2, 0x03,
- 0x8f, 0x7b, 0xe8, 0xe5, 0x88, 0xba, 0x45, 0x7c, 0xbb, 0x15, 0x53, 0xb7, 0x62, 0xea, 0xc5, 0xd7,
- 0xf7, 0x6d, 0x7e, 0x10, 0xee, 0xb6, 0x2c, 0xaf, 0xdb, 0xde, 0xf7, 0xf6, 0xbd, 0xb6, 0x64, 0xda,
- 0x0d, 0xf7, 0xe4, 0x97, 0xfc, 0x90, 0xbf, 0x22, 0x61, 0x8b, 0x46, 0x46, 0xb5, 0xe5, 0x05, 0x42,
- 0x6d, 0x51, 0xe1, 0xe2, 0x9b, 0x29, 0x4d, 0x97, 0x58, 0x07, 0xb6, 0x4b, 0x83, 0xe3, 0xb6, 0x7f,
- 0xb8, 0x9f, 0xb7, 0xf7, 0x34, 0x5c, 0xac, 0xdd, 0xa5, 0x9c, 0x0c, 0xd3, 0xd5, 0x2e, 0xe3, 0x0a,
- 0x42, 0x97, 0xdb, 0xdd, 0x41, 0x35, 0x6f, 0x3d, 0x8f, 0x81, 0x59, 0x07, 0xb4, 0x4b, 0x8a, 0x7c,
- 0xc6, 0xaf, 0x75, 0xb8, 0xb0, 0xea, 0x38, 0x9e, 0x25, 0x60, 0xeb, 0xf4, 0xc8, 0xb6, 0xe8, 0x0e,
- 0x27, 0x3c, 0x64, 0xe8, 0xeb, 0x30, 0xde, 0x09, 0xec, 0x23, 0x1a, 0x2c, 0x68, 0x4b, 0xda, 0xf2,
- 0x94, 0x39, 0xfb, 0xb8, 0xd7, 0x1c, 0xe9, 0xf7, 0x9a, 0xe3, 0xeb, 0x12, 0x8a, 0x15, 0x16, 0x2d,
- 0xc1, 0xa8, 0xef, 0x79, 0xce, 0x42, 0x4d, 0x52, 0x4d, 0x2b, 0xaa, 0xd1, 0x6d, 0xcf, 0x73, 0xb0,
- 0xc4, 0x48, 0x49, 0x52, 0xf2, 0x82, 0x5e, 0x90, 0x24, 0xa1, 0x58, 0x61, 0x91, 0x05, 0x60, 0x79,
- 0x6e, 0xc7, 0xe6, 0xb6, 0xe7, 0xb2, 0x85, 0xd1, 0x25, 0x7d, 0xb9, 0xbe, 0xd2, 0x6e, 0xa5, 0x61,
- 0x4e, 0x0e, 0xd6, 0xf2, 0x0f, 0xf7, 0x05, 0x80, 0xb5, 0x84, 0xff, 0x5a, 0x47, 0x57, 0x5b, 0x6b,
- 0x31, 0x9f, 0x89, 0x94, 0x70, 0x48, 0x40, 0x0c, 0x67, 0xc4, 0xa2, 0x3b, 0x30, 0xda, 0x21, 0x9c,
- 0x2c, 0x8c, 0x2d, 0x69, 0xcb, 0xf5, 0x95, 0xd7, 0x4b, 0xc5, 0x2b, 0xbf, 0xb5, 0x30, 0x79, 0xb8,
- 0xf1, 0x11, 0xa7, 0x2e, 0x13, 0xc2, 0x93, 0xd3, 0xad, 0x13, 0x4e, 0xb0, 0x14, 0x84, 0x76, 0xa1,
- 0xee, 0x52, 0xfe, 0xd0, 0x0b, 0x0e, 0x05, 0x70, 0x61, 0x5c, 0xca, 0xcd, 0x9a, 0x3d, 0x98, 0x9d,
- 0xad, 0x2d, 0xc5, 0x20, 0xcf, 0x2d, 0xd8, 0xcc, 0xb9, 0x7e, 0xaf, 0x59, 0xdf, 0x4a, 0xe5, 0xe0,
- 0xac, 0x50, 0xe3, 0xef, 0x1a, 0xcc, 0xab, 0x28, 0xd9, 0x9e, 0x8b, 0x29, 0x0b, 0x1d, 0x8e, 0x7e,
- 0x04, 0x13, 0x91, 0xe3, 0x98, 0x8c, 0x50, 0x7d, 0xe5, 0xcd, 0x93, 0x95, 0x46, 0xda, 0x8a, 0x62,
- 0xcc, 0x39, 0x75, 0xa6, 0x89, 0x08, 0xcf, 0x70, 0x2c, 0x15, 0xdd, 0x83, 0x69, 0xd7, 0xeb, 0xd0,
- 0x1d, 0xea, 0x50, 0x8b, 0x7b, 0x81, 0x8c, 0x5e, 0x7d, 0x65, 0x29, 0xab, 0x45, 0xd4, 0x8a, 0xf0,
- 0xff, 0x56, 0x86, 0xce, 0x9c, 0xef, 0xf7, 0x9a, 0xd3, 0x59, 0x08, 0xce, 0xc9, 0x31, 0x3e, 0xd5,
- 0xa1, 0x6e, 0x12, 0x66, 0x5b, 0x91, 0x46, 0xf4, 0x53, 0x00, 0xc2, 0x79, 0x60, 0xef, 0x86, 0x5c,
- 0x9e, 0x45, 0xc4, 0xfd, 0x9b, 0x27, 0x9f, 0x25, 0xc3, 0xde, 0x5a, 0x4d, 0x78, 0x37, 0x5c, 0x1e,
- 0x1c, 0x9b, 0xaf, 0xc4, 0x19, 0x90, 0x22, 0x7e, 0xf6, 0xaf, 0xe6, 0xcc, 0xdd, 0x90, 0x38, 0xf6,
- 0x9e, 0x4d, 0x3b, 0x5b, 0xa4, 0x4b, 0x71, 0x46, 0x23, 0x3a, 0x82, 0x49, 0x8b, 0xf8, 0xc4, 0xb2,
- 0xf9, 0xf1, 0x42, 0x4d, 0x6a, 0x7f, 0xbb, 0xba, 0xf6, 0x35, 0xc5, 0x19, 0xe9, 0xbe, 0xa4, 0x74,
- 0x4f, 0xc6, 0xe0, 0x41, 0xcd, 0x89, 0xae, 0x45, 0x07, 0xe6, 0x0a, 0xb6, 0xa3, 0x79, 0xd0, 0x0f,
- 0xe9, 0x71, 0x54, 0x71, 0x58, 0xfc, 0x44, 0x6b, 0x30, 0x76, 0x44, 0x9c, 0x90, 0xca, 0xfa, 0xca,
- 0x27, 0x6c, 0x79, 0x8c, 0x63, 0xa9, 0x38, 0xe2, 0xfd, 0x56, 0xed, 0x1d, 0x6d, 0xf1, 0x10, 0x66,
- 0x72, 0xb6, 0x0e, 0xd1, 0xb5, 0x9e, 0xd7, 0xd5, 0x3a, 0xa9, 0xf6, 0x52, 0xe5, 0x77, 0x43, 0xe2,
- 0x72, 0x9b, 0x1f, 0x67, 0x94, 0x19, 0x37, 0xe0, 0xdc, 0xda, 0xc6, 0x2d, 0xd5, 0x4f, 0x54, 0xdc,
- 0xd1, 0x0a, 0x00, 0xfd, 0xc8, 0x0f, 0x28, 0x13, 0xb5, 0xa4, 0xba, 0x4a, 0x52, 0xae, 0x1b, 0x09,
- 0x06, 0x67, 0xa8, 0x8c, 0x23, 0x50, 0x5d, 0x42, 0xf4, 0x19, 0x97, 0x74, 0xa9, 0xe2, 0x4b, 0x2a,
- 0x51, 0xfa, 0x54, 0x62, 0xd0, 0x4d, 0x18, 0xdb, 0x15, 0x91, 0x51, 0xe6, 0x5f, 0xae, 0x1c, 0x44,
- 0x73, 0xaa, 0xdf, 0x6b, 0x8e, 0x49, 0x00, 0x8e, 0x44, 0x18, 0x8f, 0x6a, 0xf0, 0xd5, 0x62, 0xc1,
- 0xac, 0x79, 0xee, 0x9e, 0xbd, 0x1f, 0x06, 0xf2, 0x03, 0x7d, 0x07, 0xc6, 0x23, 0x91, 0xca, 0xa2,
- 0xe5, 0xb8, 0xab, 0xed, 0x48, 0xe8, 0xb3, 0x5e, 0xf3, 0xa5, 0x22, 0x6b, 0x84, 0xc1, 0x8a, 0x0f,
- 0x2d, 0xc3, 0x64, 0x40, 0x3f, 0x0c, 0x29, 0xe3, 0x4c, 0xe6, 0xdd, 0x94, 0x39, 0x2d, 0x52, 0x07,
- 0x2b, 0x18, 0x4e, 0xb0, 0xe8, 0x63, 0x0d, 0xce, 0x47, 0x55, 0x99, 0xb3, 0x41, 0x55, 0xe4, 0xd5,
- 0x2a, 0x39, 0x91, 0x63, 0x34, 0xbf, 0xa2, 0x8c, 0x3d, 0x3f, 0x04, 0x89, 0x87, 0xa9, 0x32, 0xfe,
- 0xa3, 0xc1, 0x4b, 0xc3, 0x3b, 0x08, 0xda, 0x83, 0x89, 0x40, 0xfe, 0x8a, 0x8b, 0xf7, 0x7a, 0x15,
- 0x83, 0xd4, 0x31, 0xcb, 0xfb, 0x51, 0xf4, 0xcd, 0x70, 0x2c, 0x1c, 0x59, 0x30, 0x6e, 0x49, 0x9b,
- 0x54, 0x95, 0x5e, 0x3f, 0x5d, 0xbf, 0xcb, 0x7b, 0x20, 0x19, 0x42, 0x11, 0x18, 0x2b, 0xd1, 0xc6,
- 0x6f, 0x35, 0x98, 0x2b, 0x54, 0x11, 0x6a, 0x80, 0x6e, 0xbb, 0x5c, 0xa6, 0x95, 0x1e, 0xc5, 0x68,
- 0xd3, 0xe5, 0xf7, 0x44, 0xb2, 0x63, 0x81, 0x40, 0x97, 0x60, 0x74, 0x57, 0x8c, 0x40, 0x11, 0x8e,
- 0x49, 0x73, 0xa6, 0xdf, 0x6b, 0x4e, 0x99, 0x9e, 0xe7, 0x44, 0x14, 0x12, 0x85, 0xbe, 0x01, 0xe3,
- 0x8c, 0x07, 0xb6, 0xbb, 0xbf, 0x30, 0x2a, 0xb3, 0x45, 0xf6, 0xfb, 0x1d, 0x09, 0x89, 0xc8, 0x14,
- 0x1a, 0xbd, 0x0a, 0x13, 0x47, 0x34, 0x90, 0x15, 0x32, 0x26, 0x29, 0x65, 0x37, 0xbd, 0x17, 0x81,
- 0x22, 0xd2, 0x98, 0xc0, 0xf8, 0x7d, 0x0d, 0xea, 0x2a, 0x80, 0x0e, 0xb1, 0xbb, 0xe8, 0x7e, 0x26,
- 0xa1, 0xa2, 0x48, 0xbc, 0x76, 0x8a, 0x48, 0x98, 0xf3, 0x71, 0xf3, 0x1a, 0x92, 0x81, 0x14, 0xea,
- 0x96, 0xe7, 0x32, 0x1e, 0x10, 0xdb, 0x55, 0xe9, 0x9a, 0x6f, 0x10, 0x27, 0x25, 0x9e, 0x62, 0x33,
- 0xcf, 0x2b, 0x05, 0xf5, 0x14, 0xc6, 0x70, 0x56, 0x2e, 0x7a, 0x90, 0x84, 0x58, 0x97, 0x1a, 0xde,
- 0xaa, 0xa4, 0x41, 0x1c, 0xbe, 0x5a, 0x74, 0xff, 0xaa, 0xc1, 0x42, 0x19, 0x53, 0xae, 0x1e, 0xb5,
- 0xcf, 0x54, 0x8f, 0xb5, 0xb3, 0xab, 0xc7, 0x3f, 0x69, 0x99, 0xd8, 0x33, 0x86, 0x7e, 0x0c, 0x93,
- 0x62, 0x19, 0x92, 0xbb, 0x4d, 0xb4, 0x0e, 0xbc, 0x51, 0x6d, 0x75, 0xba, 0xb3, 0xfb, 0x13, 0x6a,
- 0xf1, 0xdb, 0x94, 0x93, 0xb4, 0x19, 0xa7, 0x30, 0x9c, 0x48, 0x15, 0x9b, 0x13, 0xf3, 0xa9, 0x75,
- 0x9a, 0x41, 0x24, 0x4d, 0xdb, 0xf1, 0xa9, 0x95, 0xf6, 0x6b, 0xf1, 0x85, 0xa5, 0x20, 0xe3, 0x97,
- 0xd9, 0x60, 0x30, 0x96, 0x0f, 0x46, 0x99, 0x8b, 0xb5, 0xb3, 0x73, 0xf1, 0x1f, 0x93, 0x56, 0x20,
- 0xed, 0xbb, 0x65, 0x33, 0x8e, 0x3e, 0x18, 0x70, 0x73, 0xab, 0x9a, 0x9b, 0x05, 0xb7, 0x74, 0x72,
- 0x52, 0x65, 0x31, 0x24, 0xe3, 0xe2, 0x2d, 0x18, 0xb3, 0x39, 0xed, 0xc6, 0xf5, 0x75, 0xb9, 0xb2,
- 0x8f, 0xcd, 0x19, 0x25, 0x75, 0x6c, 0x53, 0xf0, 0xe3, 0x48, 0x8c, 0xf1, 0x24, 0x7f, 0x02, 0xe1,
- 0x7b, 0xf4, 0x43, 0x98, 0x62, 0x6a, 0x22, 0xc7, 0x5d, 0xe2, 0x4a, 0x15, 0x3d, 0xc9, 0x7a, 0x77,
- 0x4e, 0xa9, 0x9a, 0x8a, 0x21, 0x0c, 0xa7, 0x12, 0x33, 0x15, 0x5c, 0x3b, 0x55, 0x05, 0x17, 0xe2,
- 0x5f, 0x5a, 0xc1, 0x01, 0x0c, 0x0b, 0x20, 0xfa, 0x01, 0x8c, 0x7b, 0x3e, 0xf9, 0x30, 0xa4, 0x2a,
- 0x2a, 0xcf, 0xd9, 0xe0, 0xee, 0x48, 0xda, 0x61, 0x69, 0x02, 0x42, 0x67, 0x84, 0xc6, 0x4a, 0xa4,
- 0xf1, 0x48, 0x83, 0xf9, 0x62, 0x33, 0x3b, 0x45, 0xb7, 0xd8, 0x86, 0xd9, 0x2e, 0xe1, 0xd6, 0x41,
- 0x32, 0x50, 0xd4, 0x5d, 0x69, 0xb9, 0xdf, 0x6b, 0xce, 0xde, 0xce, 0x61, 0x9e, 0xf5, 0x9a, 0xe8,
- 0xdd, 0xd0, 0x71, 0x8e, 0xf3, 0x3b, 0x63, 0x81, 0xdf, 0xf8, 0xb9, 0x0e, 0x33, 0xb9, 0xde, 0x5d,
- 0x61, 0x3b, 0x5a, 0x85, 0xb9, 0x4e, 0xea, 0x6c, 0x81, 0x50, 0x66, 0x7c, 0x59, 0x11, 0x67, 0x33,
- 0x45, 0xf2, 0x15, 0xe9, 0xf3, 0xa9, 0xa3, 0x7f, 0xee, 0xa9, 0x73, 0x0f, 0x66, 0x49, 0x32, 0xad,
- 0x6f, 0x7b, 0x1d, 0xaa, 0x66, 0x65, 0x4b, 0x71, 0xcd, 0xae, 0xe6, 0xb0, 0xcf, 0x7a, 0xcd, 0x2f,
- 0x15, 0x67, 0xbc, 0x80, 0xe3, 0x82, 0x14, 0xf4, 0x0a, 0x8c, 0x59, 0x5e, 0xe8, 0x72, 0x39, 0x50,
- 0xf5, 0xb4, 0x54, 0xd6, 0x04, 0x10, 0x47, 0x38, 0x74, 0x15, 0xea, 0xa4, 0xd3, 0xb5, 0xdd, 0x55,
- 0xcb, 0xa2, 0x8c, 0xc9, 0x6b, 0xdc, 0x64, 0x34, 0xa5, 0x57, 0x53, 0x30, 0xce, 0xd2, 0x18, 0xff,
- 0xd5, 0xe2, 0x1d, 0xb1, 0x64, 0x97, 0x41, 0x97, 0xc5, 0x66, 0x24, 0x51, 0x2a, 0x30, 0x99, 0xe5,
- 0x46, 0x82, 0x71, 0x8c, 0xcf, 0x5c, 0xb7, 0x6b, 0x95, 0xae, 0xdb, 0x7a, 0x85, 0xeb, 0xf6, 0xe8,
- 0x89, 0xd7, 0xed, 0xc2, 0x89, 0xc7, 0x2a, 0x9c, 0xf8, 0x03, 0x98, 0x2d, 0xec, 0xf4, 0x37, 0x41,
- 0xb7, 0xa8, 0xa3, 0x8a, 0xee, 0x39, 0xb7, 0xde, 0x81, 0x1b, 0x81, 0x39, 0xd1, 0xef, 0x35, 0xf5,
- 0xb5, 0x8d, 0x5b, 0x58, 0x08, 0x31, 0x7e, 0xa7, 0xc1, 0xb9, 0x81, 0x9b, 0x31, 0xba, 0x0e, 0x33,
- 0xb6, 0xcb, 0x69, 0xb0, 0x47, 0x2c, 0xba, 0x95, 0xa6, 0xf8, 0x05, 0x75, 0xaa, 0x99, 0xcd, 0x2c,
- 0x12, 0xe7, 0x69, 0xd1, 0x45, 0xd0, 0x6d, 0x3f, 0xde, 0xae, 0xa5, 0xb6, 0xcd, 0x6d, 0x86, 0x05,
- 0x4c, 0xd4, 0xc3, 0x01, 0x09, 0x3a, 0x0f, 0x49, 0x40, 0x57, 0x3b, 0x1d, 0x71, 0xdf, 0x50, 0x3e,
- 0x4d, 0xea, 0xe1, 0xbb, 0x79, 0x34, 0x2e, 0xd2, 0x1b, 0xbf, 0xd1, 0xe0, 0x62, 0x69, 0x27, 0xa9,
- 0xfc, 0x80, 0x42, 0x00, 0x7c, 0x12, 0x90, 0x2e, 0xe5, 0x34, 0x60, 0x43, 0xa6, 0x6b, 0x85, 0x77,
- 0x89, 0x64, 0x70, 0x6f, 0x27, 0x82, 0x70, 0x46, 0xa8, 0xf1, 0xab, 0x1a, 0xcc, 0x60, 0x15, 0x8f,
- 0x68, 0x55, 0xfc, 0xff, 0xaf, 0x0b, 0x77, 0x73, 0xeb, 0xc2, 0x73, 0x52, 0x23, 0x67, 0x5c, 0xd9,
- 0xc2, 0x80, 0xee, 0x8b, 0x25, 0x9a, 0xf0, 0x90, 0x55, 0xbb, 0xf8, 0xe4, 0x85, 0x4a, 0xc6, 0x34,
- 0x08, 0xd1, 0x37, 0x56, 0x02, 0x8d, 0xbe, 0x06, 0x8d, 0x1c, 0xbd, 0xe8, 0xf4, 0x61, 0x97, 0x06,
- 0x98, 0xee, 0xd1, 0x80, 0xba, 0x16, 0x45, 0x57, 0x60, 0x92, 0xf8, 0xf6, 0x8d, 0xc0, 0x0b, 0x7d,
- 0x15, 0xd1, 0x64, 0x94, 0xaf, 0x6e, 0x6f, 0x4a, 0x38, 0x4e, 0x28, 0x04, 0x75, 0x6c, 0x91, 0xca,
- 0xab, 0xcc, 0x7a, 0x1d, 0xc1, 0x71, 0x42, 0x91, 0xb4, 0xef, 0xd1, 0xd2, 0xf6, 0x6d, 0x82, 0x1e,
- 0xda, 0x1d, 0x75, 0x27, 0x78, 0x43, 0x11, 0xe8, 0xef, 0x6f, 0xae, 0x3f, 0xeb, 0x35, 0x2f, 0x95,
- 0x3d, 0xfe, 0xf1, 0x63, 0x9f, 0xb2, 0xd6, 0xfb, 0x9b, 0xeb, 0x58, 0x30, 0x1b, 0x7f, 0xd6, 0xe0,
- 0x5c, 0xee, 0x90, 0x67, 0xb0, 0xd2, 0x6c, 0xe7, 0x57, 0x9a, 0xd7, 0x4e, 0x11, 0xb2, 0x92, 0xa5,
- 0xc6, 0x2e, 0x1c, 0x42, 0x6e, 0x35, 0xef, 0x15, 0x1f, 0xc3, 0x2e, 0x57, 0xbe, 0x39, 0x94, 0xbf,
- 0x80, 0x19, 0x7f, 0xab, 0xc1, 0xf9, 0x21, 0x59, 0x84, 0x1e, 0x00, 0xa4, 0x33, 0x66, 0x88, 0xd3,
- 0x86, 0x28, 0x1c, 0xb8, 0xe7, 0xce, 0xca, 0x27, 0xaa, 0x14, 0x9a, 0x91, 0x88, 0x18, 0xd4, 0x03,
- 0xca, 0x68, 0x70, 0x44, 0x3b, 0xef, 0x7a, 0x81, 0x72, 0xdd, 0xb7, 0x4f, 0xe1, 0xba, 0x81, 0xec,
- 0x4d, 0xef, 0x5e, 0x38, 0x15, 0x8c, 0xb3, 0x5a, 0xd0, 0x83, 0xd4, 0x85, 0xd1, 0xdb, 0xeb, 0xb5,
- 0x4a, 0x27, 0xca, 0x3f, 0x1b, 0x9f, 0xe0, 0xcc, 0x7f, 0x6a, 0x70, 0x21, 0x67, 0xe4, 0x7b, 0xb4,
- 0xeb, 0x3b, 0x84, 0xd3, 0x33, 0x68, 0x46, 0xf7, 0x73, 0xcd, 0xe8, 0xed, 0x53, 0x78, 0x32, 0x36,
- 0xb2, 0xf4, 0x16, 0xf3, 0x0f, 0x0d, 0x2e, 0x0e, 0xe5, 0x38, 0x83, 0xe2, 0xfa, 0x5e, 0xbe, 0xb8,
- 0xae, 0x7d, 0x86, 0x73, 0x95, 0xdf, 0x1c, 0x2e, 0x96, 0xfa, 0xe1, 0x0b, 0x39, 0x3d, 0x8c, 0x3f,
- 0x68, 0x30, 0x1d, 0x53, 0x8a, 0x75, 0xa9, 0xc2, 0xce, 0xbc, 0x02, 0xa0, 0xfe, 0x30, 0x89, 0x6f,
- 0xf7, 0x7a, 0x6a, 0xf7, 0x8d, 0x04, 0x83, 0x33, 0x54, 0xe8, 0x26, 0xa0, 0xd8, 0xc2, 0x1d, 0x47,
- 0x2e, 0x05, 0x62, 0xf5, 0xd4, 0x25, 0xef, 0xa2, 0xe2, 0x45, 0x78, 0x80, 0x02, 0x0f, 0xe1, 0x32,
- 0xfe, 0xa2, 0xa5, 0x73, 0x5b, 0x82, 0x5f, 0x54, 0xcf, 0x4b, 0xe3, 0x4a, 0x3d, 0x9f, 0x9d, 0x3b,
- 0x92, 0xf2, 0x85, 0x9d, 0x3b, 0xd2, 0xba, 0x92, 0x92, 0x78, 0xa4, 0x17, 0x4e, 0x21, 0x4b, 0xa1,
- 0xea, 0x96, 0x77, 0x2b, 0xf3, 0x37, 0x59, 0x7d, 0xe5, 0xd5, 0x6a, 0xe6, 0x88, 0x34, 0x1d, 0xba,
- 0xe3, 0x5f, 0x81, 0x49, 0xd7, 0xeb, 0x44, 0xfb, 0x70, 0x61, 0xbb, 0xd8, 0x52, 0x70, 0x9c, 0x50,
- 0x0c, 0xfc, 0x91, 0x33, 0xfa, 0xf9, 0xfc, 0x91, 0x23, 0x37, 0x22, 0xc7, 0x11, 0x04, 0xf1, 0xf5,
- 0x21, 0xdd, 0x88, 0x14, 0x1c, 0x27, 0x14, 0xe8, 0x4e, 0x3a, 0x5f, 0xc6, 0x65, 0x4c, 0xbe, 0x56,
- 0x65, 0x44, 0x97, 0x0f, 0x14, 0xd3, 0x7c, 0xfc, 0xb4, 0x31, 0xf2, 0xe4, 0x69, 0x63, 0xe4, 0x93,
- 0xa7, 0x8d, 0x91, 0x8f, 0xfb, 0x0d, 0xed, 0x71, 0xbf, 0xa1, 0x3d, 0xe9, 0x37, 0xb4, 0x4f, 0xfa,
- 0x0d, 0xed, 0xd3, 0x7e, 0x43, 0xfb, 0xc5, 0xbf, 0x1b, 0x23, 0xdf, 0x7f, 0xf9, 0xa4, 0x7f, 0x95,
- 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x60, 0x85, 0x64, 0x74, 0x1e, 0x00, 0x00,
+ // 2635 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x1a, 0x5b, 0x6f, 0x1c, 0x57,
+ 0x39, 0xb3, 0xbb, 0x5e, 0xaf, 0xbf, 0x8d, 0x1d, 0xfb, 0x84, 0x84, 0x8d, 0x49, 0x77, 0x93, 0x09,
+ 0x17, 0xa7, 0x75, 0xd6, 0x8d, 0x53, 0xb5, 0x85, 0x80, 0x84, 0xd7, 0x76, 0x52, 0xa7, 0x89, 0xe3,
+ 0x9c, 0x75, 0x03, 0x81, 0x12, 0x18, 0xcf, 0x1e, 0xdb, 0x83, 0x67, 0x67, 0xa6, 0x73, 0x66, 0x9d,
+ 0x5a, 0x42, 0xa8, 0xe2, 0x07, 0x54, 0xbc, 0xf2, 0x80, 0x2a, 0xf1, 0x50, 0x89, 0x17, 0xe0, 0x99,
+ 0x17, 0x90, 0x40, 0x6a, 0x04, 0x3c, 0x44, 0xa2, 0x42, 0x15, 0x12, 0x0b, 0x59, 0x84, 0xf8, 0x0b,
+ 0xc8, 0x4f, 0xe8, 0x5c, 0xe6, 0xba, 0x3b, 0xce, 0xac, 0x49, 0xac, 0x20, 0xf5, 0x6d, 0xf7, 0x3b,
+ 0xdf, 0xed, 0x7c, 0xf7, 0x73, 0xe6, 0xc0, 0xec, 0xce, 0xeb, 0xb4, 0x6e, 0xd8, 0x73, 0x9a, 0x63,
+ 0xcc, 0xb9, 0x84, 0xda, 0x1d, 0x57, 0x27, 0x73, 0xbb, 0x97, 0x35, 0xd3, 0xd9, 0xd6, 0xae, 0xcc,
+ 0x6d, 0x11, 0x8b, 0xb8, 0x9a, 0x47, 0x5a, 0x75, 0xc7, 0xb5, 0x3d, 0x1b, 0x9d, 0x15, 0xd8, 0x75,
+ 0xcd, 0x31, 0xea, 0x3e, 0x76, 0xdd, 0xc7, 0x9e, 0xbe, 0xb4, 0x65, 0x78, 0xdb, 0x9d, 0x8d, 0xba,
+ 0x6e, 0xb7, 0xe7, 0xb6, 0xec, 0x2d, 0x7b, 0x8e, 0x13, 0x6d, 0x74, 0x36, 0xf9, 0x3f, 0xfe, 0x87,
+ 0xff, 0x12, 0xcc, 0xa6, 0xd5, 0x88, 0x68, 0xdd, 0x76, 0x99, 0xd8, 0xa4, 0xc0, 0xe9, 0x57, 0x42,
+ 0x9c, 0xb6, 0xa6, 0x6f, 0x1b, 0x16, 0x71, 0xf7, 0xe6, 0x9c, 0x9d, 0xad, 0xb8, 0xbe, 0xc3, 0x50,
+ 0xd1, 0xb9, 0x36, 0xf1, 0xb4, 0x41, 0xb2, 0xe6, 0xd2, 0xa8, 0xdc, 0x8e, 0xe5, 0x19, 0xed, 0x7e,
+ 0x31, 0xaf, 0x3e, 0x89, 0x80, 0xea, 0xdb, 0xa4, 0xad, 0x25, 0xe9, 0xd4, 0x0f, 0xf2, 0x70, 0x6a,
+ 0xc1, 0x34, 0x6d, 0x9d, 0xc1, 0x96, 0xc8, 0xae, 0xa1, 0x93, 0xa6, 0xa7, 0x79, 0x1d, 0x8a, 0xbe,
+ 0x08, 0xc5, 0x96, 0x6b, 0xec, 0x12, 0xb7, 0xa2, 0x9c, 0x53, 0x66, 0xc6, 0x1a, 0x13, 0x0f, 0xbb,
+ 0xb5, 0x63, 0xbd, 0x6e, 0xad, 0xb8, 0xc4, 0xa1, 0x58, 0xae, 0xa2, 0x73, 0x50, 0x70, 0x6c, 0xdb,
+ 0xac, 0xe4, 0x38, 0xd6, 0x71, 0x89, 0x55, 0x58, 0xb3, 0x6d, 0x13, 0xf3, 0x15, 0xce, 0x89, 0x73,
+ 0xae, 0xe4, 0x13, 0x9c, 0x38, 0x14, 0xcb, 0x55, 0xa4, 0x03, 0xe8, 0xb6, 0xd5, 0x32, 0x3c, 0xc3,
+ 0xb6, 0x68, 0xa5, 0x70, 0x2e, 0x3f, 0x53, 0x9e, 0x9f, 0xab, 0x87, 0x6e, 0x0e, 0x36, 0x56, 0x77,
+ 0x76, 0xb6, 0x18, 0x80, 0xd6, 0x99, 0xfd, 0xea, 0xbb, 0x97, 0xeb, 0x8b, 0x3e, 0x5d, 0x03, 0x49,
+ 0xe6, 0x10, 0x80, 0x28, 0x8e, 0xb0, 0x45, 0x6f, 0x42, 0xa1, 0xa5, 0x79, 0x5a, 0x65, 0xe4, 0x9c,
+ 0x32, 0x53, 0x9e, 0xbf, 0x94, 0xca, 0x5e, 0xda, 0xad, 0x8e, 0xb5, 0x07, 0xcb, 0xef, 0x7a, 0xc4,
+ 0xa2, 0x8c, 0x79, 0x89, 0xed, 0x6c, 0x49, 0xf3, 0x34, 0xcc, 0x99, 0xa0, 0x0d, 0x28, 0x5b, 0xc4,
+ 0x7b, 0x60, 0xbb, 0x3b, 0x0c, 0x58, 0x29, 0x72, 0x9e, 0x51, 0x95, 0xfb, 0x23, 0xb3, 0xbe, 0x2a,
+ 0x09, 0xf8, 0x9e, 0x19, 0x59, 0xe3, 0x44, 0xaf, 0x5b, 0x2b, 0xaf, 0x86, 0x7c, 0x70, 0x94, 0xa9,
+ 0xfa, 0x47, 0x05, 0x26, 0xa5, 0x87, 0x0c, 0xdb, 0xc2, 0x84, 0x76, 0x4c, 0x0f, 0x7d, 0x17, 0x46,
+ 0x85, 0xd1, 0x28, 0xf7, 0x4e, 0x79, 0xfe, 0x95, 0x83, 0x85, 0x0a, 0x69, 0x49, 0x36, 0x8d, 0x13,
+ 0xd2, 0x58, 0xa3, 0x62, 0x9d, 0x62, 0x9f, 0x2b, 0xba, 0x0b, 0xc7, 0x2d, 0xbb, 0x45, 0x9a, 0xc4,
+ 0x24, 0xba, 0x67, 0xbb, 0xdc, 0x73, 0xe5, 0xf9, 0x73, 0x51, 0x29, 0x2c, 0x4f, 0x98, 0xed, 0x57,
+ 0x23, 0x78, 0x8d, 0xc9, 0x5e, 0xb7, 0x76, 0x3c, 0x0a, 0xc1, 0x31, 0x3e, 0xea, 0xdf, 0x8a, 0x50,
+ 0x6e, 0x68, 0xd4, 0xd0, 0x85, 0x44, 0xf4, 0x43, 0x00, 0xcd, 0xf3, 0x5c, 0x63, 0xa3, 0xe3, 0xf1,
+ 0xbd, 0x30, 0x9f, 0x7f, 0xf9, 0xe0, 0xbd, 0x44, 0xc8, 0xeb, 0x0b, 0x01, 0xed, 0xb2, 0xe5, 0xb9,
+ 0x7b, 0x8d, 0x0b, 0xbe, 0xf7, 0xc3, 0x85, 0x1f, 0xfd, 0xbd, 0x36, 0x7e, 0xa7, 0xa3, 0x99, 0xc6,
+ 0xa6, 0x41, 0x5a, 0xab, 0x5a, 0x9b, 0xe0, 0x88, 0x44, 0xb4, 0x0b, 0x25, 0x5d, 0x73, 0x34, 0xdd,
+ 0xf0, 0xf6, 0x2a, 0x39, 0x2e, 0xfd, 0xb5, 0xec, 0xd2, 0x17, 0x25, 0xa5, 0x90, 0x7d, 0x5e, 0xca,
+ 0x2e, 0xf9, 0xe0, 0x7e, 0xc9, 0x81, 0x2c, 0xf4, 0x03, 0x98, 0xd4, 0x6d, 0x8b, 0x76, 0xda, 0x84,
+ 0x2e, 0xda, 0x1d, 0xcb, 0x23, 0x2e, 0xad, 0xe4, 0xb9, 0xfc, 0x57, 0xb3, 0x78, 0x52, 0xd2, 0x2c,
+ 0x72, 0x16, 0x0e, 0x0f, 0xfc, 0x8a, 0x14, 0x3f, 0xb9, 0x98, 0xe0, 0x8b, 0xfb, 0x24, 0xa1, 0x19,
+ 0x28, 0x31, 0xaf, 0x30, 0x9d, 0x2a, 0x05, 0x91, 0xb7, 0x4c, 0xf1, 0x55, 0x09, 0xc3, 0xc1, 0x6a,
+ 0x5f, 0x1c, 0x8c, 0x3c, 0x9d, 0x38, 0x60, 0x1a, 0x68, 0xa6, 0xc9, 0x10, 0x28, 0x4f, 0x9b, 0x92,
+ 0xd0, 0x60, 0x41, 0xc2, 0x70, 0xb0, 0x8a, 0xee, 0x40, 0xd1, 0xd3, 0x0c, 0xcb, 0xa3, 0x95, 0x51,
+ 0x6e, 0x9f, 0x8b, 0x59, 0xec, 0xb3, 0xce, 0x28, 0xc2, 0x42, 0xc3, 0xff, 0x52, 0x2c, 0x19, 0x4d,
+ 0x9b, 0x70, 0x22, 0x11, 0x38, 0x68, 0x12, 0xf2, 0x3b, 0x64, 0x4f, 0x94, 0x3a, 0xcc, 0x7e, 0xa2,
+ 0x45, 0x18, 0xd9, 0xd5, 0xcc, 0x0e, 0xe1, 0x85, 0x2d, 0x5e, 0x29, 0xd2, 0x13, 0xcc, 0xe7, 0x8a,
+ 0x05, 0xed, 0x57, 0x72, 0xaf, 0x2b, 0xd3, 0x3b, 0x30, 0x1e, 0x0b, 0x94, 0x01, 0xb2, 0x96, 0xe2,
+ 0xb2, 0xea, 0x07, 0x15, 0xbd, 0x50, 0xf8, 0x9d, 0x8e, 0x66, 0x79, 0x86, 0xb7, 0x17, 0x11, 0xa6,
+ 0x5e, 0x87, 0xa9, 0xc5, 0xe5, 0x9b, 0xb2, 0x90, 0xfb, 0xc6, 0x9e, 0x07, 0x20, 0xef, 0x3a, 0x2e,
+ 0xa1, 0xac, 0x88, 0xc9, 0x72, 0x1e, 0xd4, 0xc9, 0xe5, 0x60, 0x05, 0x47, 0xb0, 0xd4, 0xfb, 0x30,
+ 0x2a, 0xc3, 0x05, 0x35, 0x7d, 0xed, 0x94, 0xc3, 0x68, 0xd7, 0x18, 0x97, 0x92, 0x46, 0xee, 0x32,
+ 0x26, 0x52, 0x59, 0xf5, 0x3f, 0x0a, 0x80, 0x14, 0xd0, 0x24, 0x1e, 0xeb, 0x22, 0x16, 0x8b, 0x46,
+ 0x25, 0xde, 0x45, 0x78, 0x34, 0xf2, 0x15, 0xd4, 0x82, 0x92, 0xee, 0x67, 0x4a, 0x2e, 0x4b, 0xa6,
+ 0x84, 0xdc, 0xfd, 0x9f, 0xb2, 0x48, 0x4c, 0x06, 0x89, 0xea, 0x67, 0x48, 0xc0, 0x79, 0x7a, 0x03,
+ 0xc6, 0x63, 0xc8, 0x03, 0x9c, 0x75, 0x35, 0xee, 0xac, 0x2f, 0x64, 0xd2, 0x22, 0xea, 0xa3, 0x5d,
+ 0x90, 0x9d, 0x2f, 0xc3, 0xae, 0x6f, 0xc0, 0xc8, 0x06, 0xab, 0x38, 0x52, 0xd8, 0xc5, 0xcc, 0xc5,
+ 0xa9, 0x31, 0xc6, 0x4c, 0xce, 0x01, 0x58, 0xb0, 0x50, 0xdf, 0xcf, 0xc1, 0x0b, 0xc9, 0x46, 0xb0,
+ 0x68, 0x5b, 0x9b, 0xc6, 0x56, 0xc7, 0xe5, 0x7f, 0xd0, 0xd7, 0xa1, 0x28, 0x58, 0x4a, 0x8d, 0x66,
+ 0xfc, 0x04, 0x6a, 0x72, 0xe8, 0x7e, 0xb7, 0x76, 0x3a, 0x49, 0x2a, 0x56, 0xb0, 0xa4, 0x63, 0x79,
+ 0xed, 0x92, 0x77, 0x3a, 0x84, 0x7a, 0xc2, 0x4b, 0xb2, 0xb2, 0x60, 0x09, 0xc3, 0xc1, 0x2a, 0x7a,
+ 0x4f, 0x81, 0x93, 0x2d, 0x59, 0xcc, 0x22, 0x3a, 0xc8, 0x4e, 0x73, 0x39, 0x5b, 0x15, 0x8c, 0x10,
+ 0x36, 0x3e, 0x27, 0x95, 0x3d, 0x39, 0x60, 0x11, 0x0f, 0x12, 0xa5, 0xfe, 0x4b, 0x81, 0xd3, 0x83,
+ 0x3b, 0x23, 0xda, 0x84, 0x51, 0x97, 0xff, 0xf2, 0x9b, 0xd2, 0xd5, 0x2c, 0x0a, 0xc9, 0x6d, 0xa6,
+ 0xf7, 0x59, 0xf1, 0x9f, 0x62, 0x9f, 0x39, 0xd2, 0xa1, 0xa8, 0x73, 0x9d, 0x64, 0x4c, 0x5f, 0x1d,
+ 0xae, 0x8f, 0xc7, 0x2d, 0x10, 0xd4, 0x3b, 0x01, 0xc6, 0x92, 0xb5, 0xfa, 0x73, 0x05, 0x4e, 0x24,
+ 0x0a, 0x14, 0xaa, 0x42, 0xde, 0xb0, 0x3c, 0x1e, 0x56, 0x79, 0xe1, 0xa3, 0x15, 0xcb, 0x13, 0x19,
+ 0xca, 0x16, 0xd0, 0x79, 0x28, 0x6c, 0xb0, 0xb1, 0x2e, 0xcf, 0x8b, 0xf3, 0x78, 0xaf, 0x5b, 0x1b,
+ 0x6b, 0xd8, 0xb6, 0x29, 0x30, 0xf8, 0x12, 0xfa, 0x12, 0x14, 0xa9, 0xe7, 0x1a, 0xd6, 0x96, 0xec,
+ 0x21, 0x7c, 0x8e, 0x69, 0x72, 0x88, 0x40, 0x93, 0xcb, 0xe8, 0x45, 0x18, 0xdd, 0x25, 0x2e, 0x2f,
+ 0x3e, 0x23, 0x1c, 0x93, 0x77, 0x87, 0xbb, 0x02, 0x24, 0x50, 0x7d, 0x04, 0xf5, 0x97, 0x39, 0x28,
+ 0x4b, 0x07, 0x9a, 0x9a, 0xd1, 0x46, 0xf7, 0x22, 0x01, 0x25, 0x3c, 0xf1, 0xd2, 0x10, 0x9e, 0x08,
+ 0x73, 0x7d, 0x40, 0x04, 0x12, 0x28, 0xb3, 0xce, 0xe8, 0xb9, 0xa2, 0xbd, 0x08, 0x07, 0xd4, 0x33,
+ 0x06, 0x9e, 0x24, 0x6b, 0x9c, 0x94, 0x02, 0xca, 0x21, 0x8c, 0xe2, 0x28, 0x5f, 0x74, 0x3f, 0x70,
+ 0xf1, 0x30, 0x0d, 0x9e, 0x6d, 0x3e, 0x9b, 0x77, 0x3f, 0x52, 0xa0, 0x92, 0x46, 0x14, 0xcb, 0x47,
+ 0xe5, 0x50, 0xf9, 0x98, 0x3b, 0xba, 0x7c, 0xfc, 0xad, 0x12, 0xf1, 0x3d, 0xa5, 0xe8, 0x7b, 0x50,
+ 0x62, 0x03, 0x3e, 0x9f, 0xd7, 0x45, 0xef, 0x79, 0x39, 0xdb, 0x71, 0xe0, 0xf6, 0xc6, 0xf7, 0x89,
+ 0xee, 0xdd, 0x22, 0x9e, 0x16, 0xf6, 0xb9, 0x10, 0x86, 0x03, 0xae, 0xe8, 0x36, 0x14, 0xa8, 0x43,
+ 0xf4, 0x61, 0x7a, 0x3c, 0x57, 0xad, 0xe9, 0x10, 0x3d, 0xac, 0xd7, 0xec, 0x1f, 0xe6, 0x8c, 0xd4,
+ 0x9f, 0x46, 0x9d, 0x41, 0x69, 0xdc, 0x19, 0x69, 0x26, 0x56, 0x8e, 0xce, 0xc4, 0xbf, 0x09, 0x4a,
+ 0x01, 0xd7, 0xef, 0xa6, 0x41, 0x3d, 0xf4, 0x76, 0x9f, 0x99, 0xeb, 0xd9, 0xcc, 0xcc, 0xa8, 0xb9,
+ 0x91, 0x83, 0x2c, 0xf3, 0x21, 0x11, 0x13, 0xaf, 0xc2, 0x88, 0xe1, 0x91, 0xb6, 0x9f, 0x5f, 0x17,
+ 0x33, 0xdb, 0x38, 0x1c, 0x1c, 0x56, 0x18, 0x3d, 0x16, 0x6c, 0xd4, 0x47, 0xf1, 0x1d, 0x30, 0xdb,
+ 0xa3, 0xef, 0xc0, 0x18, 0x95, 0xc3, 0x8e, 0x5f, 0x25, 0x66, 0xb3, 0xc8, 0x09, 0xc6, 0xd5, 0x29,
+ 0x29, 0x6a, 0xcc, 0x87, 0x50, 0x1c, 0x72, 0x8c, 0x64, 0x70, 0x6e, 0xa8, 0x0c, 0x4e, 0xf8, 0x3f,
+ 0x35, 0x83, 0x5d, 0x18, 0xe4, 0x40, 0xf4, 0x6d, 0x28, 0xda, 0x8e, 0xf6, 0x4e, 0x30, 0x78, 0x3d,
+ 0xe1, 0x64, 0x72, 0x9b, 0xe3, 0x0e, 0x0a, 0x13, 0x60, 0x32, 0xc5, 0x32, 0x96, 0x2c, 0xd5, 0xf7,
+ 0x15, 0x98, 0x4c, 0x16, 0xb3, 0x21, 0xaa, 0xc5, 0x1a, 0x4c, 0xb4, 0x35, 0x4f, 0xdf, 0x0e, 0x1a,
+ 0x8a, 0x3c, 0xff, 0xcf, 0xf4, 0xba, 0xb5, 0x89, 0x5b, 0xb1, 0x95, 0xfd, 0x6e, 0x0d, 0x5d, 0xeb,
+ 0x98, 0xe6, 0x5e, 0xfc, 0x2c, 0x94, 0xa0, 0x57, 0x3f, 0xcc, 0x05, 0x99, 0xd3, 0x77, 0xb8, 0x61,
+ 0x13, 0xac, 0x1e, 0x8c, 0x73, 0xc9, 0x09, 0x36, 0x1c, 0xf4, 0x70, 0x04, 0x0b, 0xb9, 0x7d, 0x03,
+ 0xe3, 0xd2, 0xe1, 0x8e, 0x56, 0xcf, 0xd9, 0xf8, 0xf8, 0xd7, 0x02, 0x8c, 0xc7, 0x9a, 0x5c, 0x86,
+ 0x31, 0x72, 0x01, 0x4e, 0xb4, 0xc2, 0xa8, 0xe4, 0xe7, 0x3e, 0xe1, 0xaf, 0xcf, 0x4a, 0xe4, 0x68,
+ 0x4a, 0x71, 0xba, 0x24, 0x7e, 0x3c, 0xc7, 0xf2, 0x4f, 0x3d, 0xc7, 0xee, 0xc2, 0x84, 0x16, 0x8c,
+ 0x35, 0xb7, 0xec, 0x96, 0x7f, 0x30, 0xad, 0x4b, 0xaa, 0x89, 0x85, 0xd8, 0xea, 0x7e, 0xb7, 0xf6,
+ 0x99, 0xe4, 0x30, 0xc4, 0xe0, 0x38, 0xc1, 0x05, 0x5d, 0x80, 0x11, 0xee, 0x1d, 0x3e, 0x79, 0xe4,
+ 0xc3, 0x9a, 0xc2, 0x0d, 0x8b, 0xc5, 0x1a, 0xba, 0x0c, 0x65, 0xad, 0xd5, 0x36, 0xac, 0x05, 0x5d,
+ 0x27, 0xd4, 0x3f, 0x90, 0xf2, 0x71, 0x66, 0x21, 0x04, 0xe3, 0x28, 0x0e, 0xb2, 0x60, 0x62, 0xd3,
+ 0x70, 0xa9, 0xb7, 0xb0, 0xab, 0x19, 0xa6, 0xb6, 0x61, 0x12, 0x79, 0x3c, 0xcd, 0x34, 0x3f, 0x34,
+ 0x3b, 0x1b, 0xfe, 0x80, 0x72, 0xda, 0xdf, 0xdf, 0xb5, 0x18, 0x37, 0x9c, 0xe0, 0xce, 0x86, 0x15,
+ 0xcf, 0x36, 0x89, 0xc8, 0x68, 0x5a, 0x29, 0x65, 0x17, 0xb6, 0x1e, 0x90, 0x85, 0xc3, 0x4a, 0x08,
+ 0xa3, 0x38, 0xca, 0x57, 0xfd, 0x4b, 0x70, 0x46, 0x48, 0x99, 0x65, 0xd1, 0x45, 0x36, 0x19, 0xf3,
+ 0x25, 0x19, 0x6f, 0x91, 0xe1, 0x96, 0x83, 0xb1, 0xbf, 0x1e, 0xb9, 0x42, 0xcc, 0x65, 0xba, 0x42,
+ 0xcc, 0x67, 0xb8, 0x42, 0x2c, 0x1c, 0x78, 0x85, 0x98, 0x70, 0xe4, 0x48, 0x06, 0x47, 0x26, 0x0c,
+ 0x5b, 0x7c, 0x46, 0x86, 0x7d, 0x1b, 0x26, 0x12, 0xa7, 0xf2, 0x1b, 0x90, 0xd7, 0x89, 0x29, 0x6b,
+ 0xfb, 0x13, 0x2e, 0x0d, 0xfb, 0xce, 0xf4, 0x8d, 0xd1, 0x5e, 0xb7, 0x96, 0x5f, 0x5c, 0xbe, 0x89,
+ 0x19, 0x13, 0xf5, 0xd7, 0x79, 0xbf, 0x9a, 0x87, 0xa1, 0xf5, 0x69, 0x59, 0xf8, 0x5f, 0xcb, 0x42,
+ 0x22, 0x34, 0x46, 0x9f, 0x51, 0x68, 0xfc, 0x3b, 0x18, 0x7b, 0xf9, 0x3d, 0x15, 0x7a, 0x21, 0xd2,
+ 0x33, 0x1a, 0x65, 0x49, 0x9e, 0x7f, 0x93, 0xec, 0x89, 0x06, 0x72, 0x21, 0xda, 0x40, 0xc6, 0x06,
+ 0x5f, 0xaf, 0xa0, 0xab, 0x50, 0x24, 0x9b, 0x9b, 0x44, 0xf7, 0x64, 0x52, 0xf9, 0x17, 0xa3, 0xc5,
+ 0x65, 0x0e, 0xdd, 0xef, 0xd6, 0xa6, 0x22, 0x22, 0x05, 0x10, 0x4b, 0x12, 0xf4, 0x0d, 0x18, 0xf3,
+ 0x8c, 0x36, 0x59, 0x68, 0xb5, 0x48, 0x8b, 0xdb, 0xbb, 0x3c, 0xff, 0x62, 0xb6, 0x89, 0x70, 0xdd,
+ 0x68, 0x13, 0x71, 0x58, 0x5c, 0xf7, 0x19, 0xe0, 0x90, 0x97, 0xfa, 0x30, 0x98, 0xdd, 0xb8, 0x58,
+ 0xdc, 0x31, 0xc9, 0x11, 0x0c, 0xf9, 0xcd, 0xd8, 0x90, 0x7f, 0x39, 0xf3, 0xfd, 0x21, 0x53, 0x2f,
+ 0x75, 0xd0, 0xff, 0x48, 0xf1, 0x87, 0xb6, 0x00, 0xf7, 0x08, 0x86, 0x69, 0x1c, 0x1f, 0xa6, 0x2f,
+ 0x0d, 0xb5, 0x97, 0x94, 0x81, 0xfa, 0xe3, 0xfe, 0x9d, 0xf0, 0xa1, 0xba, 0x0d, 0x13, 0xad, 0x58,
+ 0xaa, 0x0e, 0x73, 0x4e, 0xe1, 0xac, 0x82, 0x1c, 0x47, 0x2c, 0x53, 0xe3, 0x79, 0x8f, 0x13, 0xcc,
+ 0xd9, 0x39, 0x81, 0x5f, 0xcf, 0x66, 0xbb, 0xe9, 0x8a, 0x5e, 0xf3, 0x06, 0xdb, 0x12, 0xfa, 0x0b,
+ 0x36, 0xea, 0x4f, 0x72, 0xb1, 0x6d, 0x05, 0x72, 0xbe, 0xd6, 0x5f, 0xf3, 0x44, 0xa6, 0x9d, 0xcc,
+ 0x54, 0xef, 0xd4, 0x44, 0x4f, 0x83, 0x01, 0xfd, 0xec, 0x6c, 0xac, 0x9f, 0x95, 0x12, 0xbd, 0x4c,
+ 0x4d, 0xf4, 0x32, 0x18, 0xd0, 0xc7, 0x62, 0x55, 0x75, 0xe4, 0x69, 0x57, 0x55, 0xf5, 0x67, 0x39,
+ 0xbf, 0x5d, 0x84, 0x45, 0xe9, 0x49, 0x65, 0xe7, 0x0d, 0x28, 0xd9, 0x0e, 0xc3, 0xb5, 0xfd, 0xad,
+ 0xcf, 0xfa, 0x81, 0x7a, 0x5b, 0xc2, 0xf7, 0xbb, 0xb5, 0x4a, 0x92, 0xad, 0xbf, 0x86, 0x03, 0xea,
+ 0xb0, 0x80, 0xe5, 0x33, 0x15, 0xb0, 0xc2, 0xf0, 0x05, 0x6c, 0x11, 0xa6, 0xc2, 0x02, 0xdb, 0x24,
+ 0xba, 0x6d, 0xb5, 0xa8, 0xac, 0xf4, 0xa7, 0x7a, 0xdd, 0xda, 0xd4, 0x7a, 0x72, 0x11, 0xf7, 0xe3,
+ 0xab, 0xbf, 0x50, 0x60, 0xaa, 0xef, 0x63, 0x1d, 0xba, 0x0a, 0xe3, 0x06, 0x9b, 0xc8, 0x37, 0x35,
+ 0x9d, 0x44, 0x82, 0xe7, 0x94, 0x54, 0x6f, 0x7c, 0x25, 0xba, 0x88, 0xe3, 0xb8, 0xe8, 0x0c, 0xe4,
+ 0x0d, 0xc7, 0xbf, 0x18, 0xe5, 0x1d, 0x7c, 0x65, 0x8d, 0x62, 0x06, 0x63, 0xad, 0x78, 0x5b, 0x73,
+ 0x5b, 0x0f, 0x34, 0x97, 0xd5, 0x4a, 0x97, 0x4d, 0x2f, 0xf9, 0x78, 0x2b, 0x7e, 0x23, 0xbe, 0x8c,
+ 0x93, 0xf8, 0xea, 0x87, 0x0a, 0x9c, 0x49, 0x3d, 0x04, 0x66, 0xfe, 0x9e, 0xab, 0x01, 0x38, 0x9a,
+ 0xab, 0xb5, 0x89, 0x3c, 0x38, 0x1d, 0xe2, 0x33, 0x69, 0x50, 0x8e, 0xd7, 0x02, 0x46, 0x38, 0xc2,
+ 0x54, 0xfd, 0x20, 0x07, 0xe3, 0x58, 0x46, 0xb0, 0xb8, 0xe5, 0x7b, 0xf6, 0x4d, 0xe0, 0x4e, 0xac,
+ 0x09, 0x3c, 0x61, 0xdc, 0x8a, 0x29, 0x97, 0xd6, 0x02, 0xd0, 0x3d, 0x28, 0x52, 0xfe, 0xad, 0x3c,
+ 0xdb, 0x9d, 0x75, 0x9c, 0x29, 0x27, 0x0c, 0x9d, 0x20, 0xfe, 0x63, 0xc9, 0x50, 0xed, 0x29, 0x50,
+ 0x8d, 0xe1, 0xcb, 0x8f, 0x7a, 0x2e, 0x26, 0x9b, 0xc4, 0x25, 0x96, 0x4e, 0xd0, 0x2c, 0x94, 0x34,
+ 0xc7, 0xb8, 0xee, 0xda, 0x1d, 0x47, 0x7a, 0x34, 0x68, 0x1c, 0x0b, 0x6b, 0x2b, 0x1c, 0x8e, 0x03,
+ 0x0c, 0x86, 0xed, 0x6b, 0x24, 0xe3, 0x2a, 0x72, 0x33, 0x2a, 0xe0, 0x38, 0xc0, 0x08, 0x26, 0xc7,
+ 0x42, 0xea, 0xe4, 0xd8, 0x80, 0x7c, 0xc7, 0x68, 0xc9, 0xeb, 0xdc, 0x97, 0xfd, 0x62, 0xf1, 0xd6,
+ 0xca, 0xd2, 0x7e, 0xb7, 0x76, 0x3e, 0xed, 0x2d, 0x82, 0xb7, 0xe7, 0x10, 0x5a, 0x7f, 0x6b, 0x65,
+ 0x09, 0x33, 0x62, 0xf5, 0x77, 0x0a, 0x4c, 0xc5, 0x36, 0x79, 0x04, 0x0d, 0x74, 0x2d, 0xde, 0x40,
+ 0x5f, 0x1a, 0xc2, 0x65, 0x29, 0xed, 0xd3, 0x48, 0x6c, 0x82, 0xf7, 0xce, 0xf5, 0xe4, 0xf7, 0xf9,
+ 0x8b, 0x99, 0x2f, 0x7d, 0xd3, 0x3f, 0xca, 0xab, 0x7f, 0xc8, 0xc1, 0xc9, 0x01, 0x51, 0x84, 0xee,
+ 0x03, 0x84, 0xe3, 0xed, 0x00, 0xa3, 0x0d, 0x10, 0xd8, 0xf7, 0x89, 0x62, 0x82, 0x7f, 0x35, 0x0f,
+ 0xa1, 0x11, 0x8e, 0x88, 0x42, 0xd9, 0x25, 0x94, 0xb8, 0xbb, 0xa4, 0x75, 0x8d, 0x57, 0x7f, 0x66,
+ 0xba, 0xaf, 0x0e, 0x61, 0xba, 0xbe, 0xe8, 0x0d, 0xa7, 0x62, 0x1c, 0x32, 0xc6, 0x51, 0x29, 0xe8,
+ 0x7e, 0x68, 0x42, 0xf1, 0x14, 0xe4, 0x4a, 0xa6, 0x1d, 0xc5, 0x5f, 0xb1, 0x1c, 0x60, 0xcc, 0x8f,
+ 0x15, 0x38, 0x15, 0x53, 0x72, 0x9d, 0xb4, 0x1d, 0x53, 0xf3, 0x8e, 0x62, 0x22, 0xbd, 0x17, 0x2b,
+ 0x46, 0xaf, 0x0d, 0x61, 0x49, 0x5f, 0xc9, 0xd4, 0xb9, 0xf4, 0xcf, 0x0a, 0x9c, 0x19, 0x48, 0x71,
+ 0x04, 0xc9, 0xf5, 0xcd, 0x78, 0x72, 0x5d, 0x39, 0xc4, 0xbe, 0xd2, 0x2f, 0x7d, 0xcf, 0xa4, 0xda,
+ 0xe1, 0xff, 0xb2, 0x7b, 0xa8, 0xbf, 0x52, 0xe0, 0xb8, 0x8f, 0xc9, 0xa6, 0xc3, 0x0c, 0xc7, 0xf5,
+ 0x79, 0x00, 0xf9, 0x7e, 0xcb, 0xff, 0x30, 0x93, 0x0f, 0xf5, 0xbe, 0x1e, 0xac, 0xe0, 0x08, 0x16,
+ 0xba, 0x01, 0xc8, 0xd7, 0xb0, 0x69, 0xfa, 0xd7, 0x9b, 0xbc, 0x05, 0xe4, 0x1b, 0xd3, 0x92, 0x16,
+ 0xe1, 0x3e, 0x0c, 0x3c, 0x80, 0x4a, 0xfd, 0xbd, 0x12, 0xf6, 0x6d, 0x0e, 0x7e, 0x5e, 0x2d, 0xcf,
+ 0x95, 0x4b, 0xb5, 0x7c, 0xb4, 0xef, 0x70, 0xcc, 0xe7, 0xb6, 0xef, 0x70, 0xed, 0x52, 0x52, 0xe2,
+ 0x4f, 0x85, 0xc4, 0x2e, 0x78, 0x2a, 0x64, 0x9d, 0xf2, 0x6e, 0x46, 0x5e, 0xed, 0xc5, 0x4f, 0xf7,
+ 0x07, 0xa8, 0xc3, 0xc2, 0x74, 0xe0, 0xf5, 0xdc, 0x6c, 0xe4, 0x3d, 0x51, 0x62, 0xba, 0xc8, 0xf0,
+ 0xa6, 0xa8, 0xf0, 0x94, 0xde, 0x14, 0xcd, 0x46, 0xde, 0x14, 0x89, 0x9b, 0xbf, 0x70, 0x22, 0xea,
+ 0x7f, 0x57, 0x74, 0x3b, 0xec, 0x2f, 0xe2, 0xce, 0xef, 0xf3, 0x59, 0x5a, 0xf4, 0x01, 0x4f, 0xe6,
+ 0x30, 0x9c, 0x76, 0x88, 0x2b, 0xc0, 0xa1, 0x96, 0x2c, 0x53, 0x47, 0xb9, 0x32, 0xd3, 0xbd, 0x6e,
+ 0xed, 0xf4, 0xda, 0x40, 0x0c, 0x9c, 0x42, 0x89, 0xb6, 0x61, 0x82, 0x6e, 0x6b, 0x2e, 0x69, 0x05,
+ 0x8f, 0xc4, 0xc4, 0xc5, 0xef, 0x4c, 0xd6, 0xa7, 0x2f, 0xe1, 0xfd, 0x72, 0x33, 0xc6, 0x07, 0x27,
+ 0xf8, 0x36, 0x1a, 0x0f, 0x1f, 0x57, 0x8f, 0x3d, 0x7a, 0x5c, 0x3d, 0xf6, 0xc9, 0xe3, 0xea, 0xb1,
+ 0xf7, 0x7a, 0x55, 0xe5, 0x61, 0xaf, 0xaa, 0x3c, 0xea, 0x55, 0x95, 0x4f, 0x7a, 0x55, 0xe5, 0x1f,
+ 0xbd, 0xaa, 0xf2, 0xe3, 0x7f, 0x56, 0x8f, 0x7d, 0xeb, 0xec, 0x41, 0x4f, 0x74, 0xff, 0x1b, 0x00,
+ 0x00, 0xff, 0xff, 0xa5, 0x57, 0x37, 0xad, 0xc1, 0x2b, 0x00, 0x00,
}
func (m *AllocatedDeviceStatus) Marshal() (dAtA []byte, err error) {
@@ -1178,16 +1509,18 @@ func (m *AllocatedDeviceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x32
}
- {
- size, err := m.Data.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
+ if m.Data != nil {
+ {
+ size, err := m.Data.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2a
}
- i--
- dAtA[i] = 0x2a
if len(m.Conditions) > 0 {
for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -1285,17 +1618,10 @@ func (m *BasicDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
- if len(m.Capacity) > 0 {
- keysForCapacity := make([]string, 0, len(m.Capacity))
- for k := range m.Capacity {
- keysForCapacity = append(keysForCapacity, string(k))
- }
- github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
- for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- {
- v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])]
- baseI := i
+ if len(m.Taints) > 0 {
+ for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- {
{
- size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -1303,21 +1629,85 @@ func (m *BasicDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0x12
- i -= len(keysForCapacity[iNdEx])
- copy(dAtA[i:], keysForCapacity[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx])))
- i--
- dAtA[i] = 0xa
- i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
- i--
- dAtA[i] = 0x12
+ dAtA[i] = 0x3a
}
}
- if len(m.Attributes) > 0 {
- keysForAttributes := make([]string, 0, len(m.Attributes))
- for k := range m.Attributes {
- keysForAttributes = append(keysForAttributes, string(k))
+ if m.AllNodes != nil {
+ i--
+ if *m.AllNodes {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.NodeSelector != nil {
+ {
+ size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.NodeName != nil {
+ i -= len(*m.NodeName)
+ copy(dAtA[i:], *m.NodeName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.ConsumesCounters) > 0 {
+ for iNdEx := len(m.ConsumesCounters) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ConsumesCounters[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Capacity) > 0 {
+ keysForCapacity := make([]string, 0, len(m.Capacity))
+ for k := range m.Capacity {
+ keysForCapacity = append(keysForCapacity, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+ for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])]
+ baseI := i
+ {
+ size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForCapacity[iNdEx])
+ copy(dAtA[i:], keysForCapacity[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Attributes) > 0 {
+ keysForAttributes := make([]string, 0, len(m.Attributes))
+ for k := range m.Attributes {
+ keysForAttributes = append(keysForAttributes, string(k))
}
github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes)
for iNdEx := len(keysForAttributes) - 1; iNdEx >= 0; iNdEx-- {
@@ -1374,6 +1764,96 @@ func (m *CELDeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *Counter) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Counter) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Value.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *CounterSet) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CounterSet) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CounterSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Counters) > 0 {
+ keysForCounters := make([]string, 0, len(m.Counters))
+ for k := range m.Counters {
+ keysForCounters = append(keysForCounters, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+ for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Counters[string(keysForCounters[iNdEx])]
+ baseI := i
+ {
+ size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForCounters[iNdEx])
+ copy(dAtA[i:], keysForCounters[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCounters[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func (m *Device) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -1919,6 +2399,63 @@ func (m *DeviceConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *DeviceCounterConsumption) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeviceCounterConsumption) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceCounterConsumption) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Counters) > 0 {
+ keysForCounters := make([]string, 0, len(m.Counters))
+ for k := range m.Counters {
+ keysForCounters = append(keysForCounters, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+ for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Counters[string(keysForCounters[iNdEx])]
+ baseI := i
+ {
+ size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForCounters[iNdEx])
+ copy(dAtA[i:], keysForCounters[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCounters[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.CounterSet)
+ copy(dAtA[i:], m.CounterSet)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CounterSet)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func (m *DeviceRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -1939,6 +2476,34 @@ func (m *DeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if len(m.Tolerations) > 0 {
+ for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ }
+ if len(m.FirstAvailable) > 0 {
+ for iNdEx := len(m.FirstAvailable) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.FirstAvailable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
if m.AdminAccess != nil {
i--
if *m.AdminAccess {
@@ -2004,6 +2569,20 @@ func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int,
_ = i
var l int
_ = l
+ if len(m.Tolerations) > 0 {
+ for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
if m.AdminAccess != nil {
i--
if *m.AdminAccess {
@@ -2072,7 +2651,7 @@ func (m *DeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *NetworkDeviceData) Marshal() (dAtA []byte, err error) {
+func (m *DeviceSubRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2082,77 +2661,66 @@ func (m *NetworkDeviceData) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *NetworkDeviceData) MarshalTo(dAtA []byte) (int, error) {
+func (m *DeviceSubRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *NetworkDeviceData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *DeviceSubRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- i -= len(m.HardwareAddress)
- copy(dAtA[i:], m.HardwareAddress)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.HardwareAddress)))
- i--
- dAtA[i] = 0x1a
- if len(m.IPs) > 0 {
- for iNdEx := len(m.IPs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.IPs[iNdEx])
- copy(dAtA[i:], m.IPs[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPs[iNdEx])))
+ if len(m.Tolerations) > 0 {
+ for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
i--
- dAtA[i] = 0x12
+ dAtA[i] = 0x3a
}
}
- i -= len(m.InterfaceName)
- copy(dAtA[i:], m.InterfaceName)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterfaceName)))
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Count))
i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
+ dAtA[i] = 0x28
+ i -= len(m.AllocationMode)
+ copy(dAtA[i:], m.AllocationMode)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode)))
+ i--
+ dAtA[i] = 0x22
+ if len(m.Selectors) > 0 {
+ for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
}
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
}
+ i -= len(m.DeviceClassName)
+ copy(dAtA[i:], m.DeviceClassName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName)))
i--
dAtA[i] = 0x12
- i -= len(m.Driver)
- copy(dAtA[i:], m.Driver)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *ResourceClaim) Marshal() (dAtA []byte, err error) {
+func (m *DeviceTaint) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2162,50 +2730,47 @@ func (m *ResourceClaim) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) {
+func (m *DeviceTaint) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *DeviceTaint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- {
- size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
+ if m.TimeAdded != nil {
+ {
+ size, err := m.TimeAdded.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
}
+ i -= len(m.Effect)
+ copy(dAtA[i:], m.Effect)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect)))
i--
dAtA[i] = 0x1a
- {
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value)))
i--
dAtA[i] = 0x12
- {
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) {
+func (m *DeviceTaintRule) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2215,40 +2780,40 @@ func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) {
+func (m *DeviceTaintRule) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *DeviceTaintRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- i -= len(m.UID)
- copy(dAtA[i:], m.UID)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
- i--
- dAtA[i] = 0x2a
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0x22
- i -= len(m.Resource)
- copy(dAtA[i:], m.Resource)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
i--
- dAtA[i] = 0x1a
- i -= len(m.APIGroup)
- copy(dAtA[i:], m.APIGroup)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup)))
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) {
+func (m *DeviceTaintRuleList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2258,12 +2823,12 @@ func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) {
+func (m *DeviceTaintRuleList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *DeviceTaintRuleList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -2295,7 +2860,7 @@ func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) {
+func (m *DeviceTaintRuleSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2305,18 +2870,18 @@ func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) {
+func (m *DeviceTaintRuleSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *DeviceTaintRuleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
- size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.Taint.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -2324,11 +2889,23 @@ func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0xa
+ dAtA[i] = 0x12
+ if m.DeviceSelector != nil {
+ {
+ size, err := m.DeviceSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
return len(dAtA) - i, nil
}
-func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) {
+func (m *DeviceTaintSelector) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2338,20 +2915,20 @@ func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) {
+func (m *DeviceTaintSelector) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *DeviceTaintSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Devices) > 0 {
- for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- {
+ if len(m.Selectors) > 0 {
+ for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
{
- size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -2359,39 +2936,41 @@ func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0x22
+ dAtA[i] = 0x2a
}
}
- if len(m.ReservedFor) > 0 {
- for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
+ if m.Device != nil {
+ i -= len(*m.Device)
+ copy(dAtA[i:], *m.Device)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Device)))
+ i--
+ dAtA[i] = 0x22
}
- if m.Allocation != nil {
- {
- size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
+ if m.Pool != nil {
+ i -= len(*m.Pool)
+ copy(dAtA[i:], *m.Pool)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Pool)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Driver != nil {
+ i -= len(*m.Driver)
+ copy(dAtA[i:], *m.Driver)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Driver)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.DeviceClassName != nil {
+ i -= len(*m.DeviceClassName)
+ copy(dAtA[i:], *m.DeviceClassName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DeviceClassName)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
-func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) {
+func (m *DeviceToleration) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2401,40 +2980,45 @@ func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) {
+func (m *DeviceToleration) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *DeviceToleration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- {
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
+ if m.TolerationSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds))
+ i--
+ dAtA[i] = 0x28
}
+ i -= len(m.Effect)
+ copy(dAtA[i:], m.Effect)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Operator)
+ copy(dAtA[i:], m.Operator)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator)))
i--
dAtA[i] = 0x12
- {
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) {
+func (m *NetworkDeviceData) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2444,44 +3028,39 @@ func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) {
+func (m *NetworkDeviceData) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *NetworkDeviceData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Items) > 0 {
- for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
+ i -= len(m.HardwareAddress)
+ copy(dAtA[i:], m.HardwareAddress)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.HardwareAddress)))
+ i--
+ dAtA[i] = 0x1a
+ if len(m.IPs) > 0 {
+ for iNdEx := len(m.IPs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.IPs[iNdEx])
+ copy(dAtA[i:], m.IPs[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPs[iNdEx])))
i--
dAtA[i] = 0x12
}
}
- {
- size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
+ i -= len(m.InterfaceName)
+ copy(dAtA[i:], m.InterfaceName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterfaceName)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) {
+func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2491,18 +3070,18 @@ func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) {
+func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -2511,20 +3090,15 @@ func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, erro
}
i--
dAtA[i] = 0x12
- {
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
+ i -= len(m.Driver)
+ copy(dAtA[i:], m.Driver)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *ResourcePool) Marshal() (dAtA []byte, err error) {
+func (m *ResourceClaim) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2534,31 +3108,50 @@ func (m *ResourcePool) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ResourcePool) MarshalTo(dAtA []byte) (int, error) {
+func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ResourcePool) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceSliceCount))
- i--
- dAtA[i] = 0x18
- i = encodeVarintGenerated(dAtA, i, uint64(m.Generation))
- i--
- dAtA[i] = 0x10
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *ResourceSlice) Marshal() (dAtA []byte, err error) {
+func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2568,40 +3161,40 @@ func (m *ResourceSlice) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) {
+func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- {
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
+ i -= len(m.UID)
+ copy(dAtA[i:], m.UID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
i--
- dAtA[i] = 0x12
- {
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
+ dAtA[i] = 0x2a
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.Resource)
+ copy(dAtA[i:], m.Resource)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.APIGroup)
+ copy(dAtA[i:], m.APIGroup)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) {
+func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2611,12 +3204,12 @@ func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) {
+func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -2648,7 +3241,7 @@ func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) {
+func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2658,12 +3251,45 @@ func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ResourceSliceSpec) MarshalTo(dAtA []byte) (int, error) {
+func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -2679,20 +3305,26 @@ func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0x32
+ dAtA[i] = 0x22
}
}
- i--
- if m.AllNodes {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
+ if len(m.ReservedFor) > 0 {
+ for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
}
- i--
- dAtA[i] = 0x28
- if m.NodeSelector != nil {
+ if m.Allocation != nil {
{
- size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -2700,15 +3332,33 @@ func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0x22
+ dAtA[i] = 0xa
}
- i -= len(m.NodeName)
- copy(dAtA[i:], m.NodeName)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName)))
- i--
- dAtA[i] = 0x1a
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
{
- size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -2717,282 +3367,432 @@ func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
}
i--
dAtA[i] = 0x12
- i -= len(m.Driver)
- copy(dAtA[i:], m.Driver)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
- offset -= sovGenerated(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
+func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- dAtA[offset] = uint8(v)
- return base
+ return dAtA[:n], nil
}
-func (m *AllocatedDeviceStatus) Size() (n int) {
- if m == nil {
- return 0
- }
+
+func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.Driver)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Pool)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Device)
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Conditions) > 0 {
- for _, e := range m.Conditions {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
}
}
- l = m.Data.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if m.NetworkData != nil {
- l = m.NetworkData.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- return n
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *AllocationResult) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Devices.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if m.NodeSelector != nil {
- l = m.NodeSelector.Size()
- n += 1 + l + sovGenerated(uint64(l))
+func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- return n
+ return dAtA[:n], nil
}
-func (m *BasicDevice) Size() (n int) {
- if m == nil {
- return 0
- }
+func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- if len(m.Attributes) > 0 {
- for k, v := range m.Attributes {
- _ = k
- _ = v
- l = v.Size()
- mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
- n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- if len(m.Capacity) > 0 {
- for k, v := range m.Capacity {
- _ = k
- _ = v
- l = v.Size()
- mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
- n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- return n
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *CELDeviceSelector) Size() (n int) {
- if m == nil {
- return 0
+func (m *ResourcePool) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- var l int
- _ = l
- l = len(m.Expression)
- n += 1 + l + sovGenerated(uint64(l))
- return n
+ return dAtA[:n], nil
}
-func (m *Device) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- if m.Basic != nil {
- l = m.Basic.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- return n
+func (m *ResourcePool) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *DeviceAllocationConfiguration) Size() (n int) {
- if m == nil {
- return 0
- }
+func (m *ResourcePool) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.Source)
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Requests) > 0 {
- for _, s := range m.Requests {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- l = m.DeviceConfiguration.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceSliceCount))
+ i--
+ dAtA[i] = 0x18
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Generation))
+ i--
+ dAtA[i] = 0x10
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *DeviceAllocationResult) Size() (n int) {
- if m == nil {
- return 0
+func (m *ResourceSlice) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- if len(m.Results) > 0 {
- for _, e := range m.Results {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- if len(m.Config) > 0 {
- for _, e := range m.Config {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- return n
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *DeviceAttribute) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.IntValue != nil {
- n += 1 + sovGenerated(uint64(*m.IntValue))
- }
- if m.BoolValue != nil {
- n += 2
- }
- if m.StringValue != nil {
- l = len(*m.StringValue)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.VersionValue != nil {
- l = len(*m.VersionValue)
- n += 1 + l + sovGenerated(uint64(l))
+func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- return n
+ return dAtA[:n], nil
}
-func (m *DeviceClaim) Size() (n int) {
- if m == nil {
- return 0
- }
+func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- if len(m.Requests) > 0 {
- for _, e := range m.Requests {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- if len(m.Constraints) > 0 {
- for _, e := range m.Constraints {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
}
}
- if len(m.Config) > 0 {
- for _, e := range m.Config {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- return n
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *DeviceClaimConfiguration) Size() (n int) {
- if m == nil {
- return 0
+func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- var l int
- _ = l
- if len(m.Requests) > 0 {
- for _, s := range m.Requests {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
+ return dAtA[:n], nil
+}
+
+func (m *ResourceSliceSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.SharedCounters) > 0 {
+ for iNdEx := len(m.SharedCounters) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.SharedCounters[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
}
}
- l = m.DeviceConfiguration.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
+ if m.PerDeviceNodeSelection != nil {
+ i--
+ if *m.PerDeviceNodeSelection {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x38
+ }
+ if len(m.Devices) > 0 {
+ for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ i--
+ if m.AllNodes {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ if m.NodeSelector != nil {
+ {
+ size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ i -= len(m.NodeName)
+ copy(dAtA[i:], m.NodeName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName)))
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Driver)
+ copy(dAtA[i:], m.Driver)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *DeviceClass) Size() (n int) {
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *AllocatedDeviceStatus) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = m.ObjectMeta.Size()
+ l = len(m.Driver)
n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
+ l = len(m.Pool)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Device)
n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Data != nil {
+ l = m.Data.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NetworkData != nil {
+ l = m.NetworkData.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
-func (m *DeviceClassConfiguration) Size() (n int) {
+func (m *AllocationResult) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = m.DeviceConfiguration.Size()
+ l = m.Devices.Size()
n += 1 + l + sovGenerated(uint64(l))
+ if m.NodeSelector != nil {
+ l = m.NodeSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
-func (m *DeviceClassList) Size() (n int) {
+func (m *BasicDevice) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Attributes) > 0 {
+ for k, v := range m.Attributes {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
}
}
- return n
-}
-
-func (m *DeviceClassSpec) Size() (n int) {
- if m == nil {
- return 0
+ if len(m.Capacity) > 0 {
+ for k, v := range m.Capacity {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
}
- var l int
- _ = l
- if len(m.Selectors) > 0 {
- for _, e := range m.Selectors {
+ if len(m.ConsumesCounters) > 0 {
+ for _, e := range m.ConsumesCounters {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
- if len(m.Config) > 0 {
- for _, e := range m.Config {
+ if m.NodeName != nil {
+ l = len(*m.NodeName)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NodeSelector != nil {
+ l = m.NodeSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.AllNodes != nil {
+ n += 2
+ }
+ if len(m.Taints) > 0 {
+ for _, e := range m.Taints {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
@@ -3000,39 +3800,29 @@ func (m *DeviceClassSpec) Size() (n int) {
return n
}
-func (m *DeviceConfiguration) Size() (n int) {
+func (m *CELDeviceSelector) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- if m.Opaque != nil {
- l = m.Opaque.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
+ l = len(m.Expression)
+ n += 1 + l + sovGenerated(uint64(l))
return n
}
-func (m *DeviceConstraint) Size() (n int) {
+func (m *Counter) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- if len(m.Requests) > 0 {
- for _, s := range m.Requests {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- if m.MatchAttribute != nil {
- l = len(*m.MatchAttribute)
- n += 1 + l + sovGenerated(uint64(l))
- }
+ l = m.Value.Size()
+ n += 1 + l + sovGenerated(uint64(l))
return n
}
-func (m *DeviceRequest) Size() (n int) {
+func (m *CounterSet) Size() (n int) {
if m == nil {
return 0
}
@@ -3040,89 +3830,141 @@ func (m *DeviceRequest) Size() (n int) {
_ = l
l = len(m.Name)
n += 1 + l + sovGenerated(uint64(l))
- l = len(m.DeviceClassName)
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Selectors) > 0 {
- for _, e := range m.Selectors {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Counters) > 0 {
+ for k, v := range m.Counters {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
}
}
- l = len(m.AllocationMode)
- n += 1 + l + sovGenerated(uint64(l))
- n += 1 + sovGenerated(uint64(m.Count))
- if m.AdminAccess != nil {
- n += 2
- }
return n
}
-func (m *DeviceRequestAllocationResult) Size() (n int) {
+func (m *Device) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = len(m.Request)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Driver)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Pool)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Device)
+ l = len(m.Name)
n += 1 + l + sovGenerated(uint64(l))
- if m.AdminAccess != nil {
- n += 2
+ if m.Basic != nil {
+ l = m.Basic.Size()
+ n += 1 + l + sovGenerated(uint64(l))
}
return n
}
-func (m *DeviceSelector) Size() (n int) {
+func (m *DeviceAllocationConfiguration) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- if m.CEL != nil {
- l = m.CEL.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Source)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Requests) > 0 {
+ for _, s := range m.Requests {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
}
+ l = m.DeviceConfiguration.Size()
+ n += 1 + l + sovGenerated(uint64(l))
return n
}
-func (m *NetworkDeviceData) Size() (n int) {
+func (m *DeviceAllocationResult) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = len(m.InterfaceName)
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.IPs) > 0 {
- for _, s := range m.IPs {
- l = len(s)
+ if len(m.Results) > 0 {
+ for _, e := range m.Results {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Config) > 0 {
+ for _, e := range m.Config {
+ l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
- l = len(m.HardwareAddress)
- n += 1 + l + sovGenerated(uint64(l))
return n
}
-func (m *OpaqueDeviceConfiguration) Size() (n int) {
+func (m *DeviceAttribute) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = len(m.Driver)
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Parameters.Size()
+ if m.IntValue != nil {
+ n += 1 + sovGenerated(uint64(*m.IntValue))
+ }
+ if m.BoolValue != nil {
+ n += 2
+ }
+ if m.StringValue != nil {
+ l = len(*m.StringValue)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.VersionValue != nil {
+ l = len(*m.VersionValue)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *DeviceClaim) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Requests) > 0 {
+ for _, e := range m.Requests {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Constraints) > 0 {
+ for _, e := range m.Constraints {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Config) > 0 {
+ for _, e := range m.Config {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DeviceClaimConfiguration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Requests) > 0 {
+ for _, s := range m.Requests {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.DeviceConfiguration.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
-func (m *ResourceClaim) Size() (n int) {
+func (m *DeviceClass) Size() (n int) {
if m == nil {
return 0
}
@@ -3132,29 +3974,21 @@ func (m *ResourceClaim) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
l = m.Spec.Size()
n += 1 + l + sovGenerated(uint64(l))
- l = m.Status.Size()
- n += 1 + l + sovGenerated(uint64(l))
return n
}
-func (m *ResourceClaimConsumerReference) Size() (n int) {
+func (m *DeviceClassConfiguration) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = len(m.APIGroup)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Resource)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.UID)
+ l = m.DeviceConfiguration.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
-func (m *ResourceClaimList) Size() (n int) {
+func (m *DeviceClassList) Size() (n int) {
if m == nil {
return 0
}
@@ -3171,65 +4005,135 @@ func (m *ResourceClaimList) Size() (n int) {
return n
}
-func (m *ResourceClaimSpec) Size() (n int) {
+func (m *DeviceClassSpec) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = m.Devices.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Selectors) > 0 {
+ for _, e := range m.Selectors {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Config) > 0 {
+ for _, e := range m.Config {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
return n
}
-func (m *ResourceClaimStatus) Size() (n int) {
+func (m *DeviceConfiguration) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- if m.Allocation != nil {
- l = m.Allocation.Size()
+ if m.Opaque != nil {
+ l = m.Opaque.Size()
n += 1 + l + sovGenerated(uint64(l))
}
- if len(m.ReservedFor) > 0 {
- for _, e := range m.ReservedFor {
- l = e.Size()
+ return n
+}
+
+func (m *DeviceConstraint) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Requests) > 0 {
+ for _, s := range m.Requests {
+ l = len(s)
n += 1 + l + sovGenerated(uint64(l))
}
}
- if len(m.Devices) > 0 {
- for _, e := range m.Devices {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if m.MatchAttribute != nil {
+ l = len(*m.MatchAttribute)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *DeviceCounterConsumption) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.CounterSet)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Counters) > 0 {
+ for k, v := range m.Counters {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
}
}
return n
}
-func (m *ResourceClaimTemplate) Size() (n int) {
+func (m *DeviceRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = m.ObjectMeta.Size()
+ l = len(m.Name)
n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
+ l = len(m.DeviceClassName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Selectors) > 0 {
+ for _, e := range m.Selectors {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.AllocationMode)
n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Count))
+ if m.AdminAccess != nil {
+ n += 2
+ }
+ if len(m.FirstAvailable) > 0 {
+ for _, e := range m.FirstAvailable {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Tolerations) > 0 {
+ for _, e := range m.Tolerations {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
return n
}
-func (m *ResourceClaimTemplateList) Size() (n int) {
+func (m *DeviceRequestAllocationResult) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = m.ListMeta.Size()
+ l = len(m.Request)
n += 1 + l + sovGenerated(uint64(l))
- if len(m.Items) > 0 {
- for _, e := range m.Items {
+ l = len(m.Driver)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Pool)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Device)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.AdminAccess != nil {
+ n += 2
+ }
+ if len(m.Tolerations) > 0 {
+ for _, e := range m.Tolerations {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
@@ -3237,33 +4141,67 @@ func (m *ResourceClaimTemplateList) Size() (n int) {
return n
}
-func (m *ResourceClaimTemplateSpec) Size() (n int) {
+func (m *DeviceSelector) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = m.ObjectMeta.Size()
+ if m.CEL != nil {
+ l = m.CEL.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *DeviceSubRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
+ l = len(m.DeviceClassName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Selectors) > 0 {
+ for _, e := range m.Selectors {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.AllocationMode)
n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Count))
+ if len(m.Tolerations) > 0 {
+ for _, e := range m.Tolerations {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
return n
}
-func (m *ResourcePool) Size() (n int) {
+func (m *DeviceTaint) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = len(m.Name)
+ l = len(m.Key)
n += 1 + l + sovGenerated(uint64(l))
- n += 1 + sovGenerated(uint64(m.Generation))
- n += 1 + sovGenerated(uint64(m.ResourceSliceCount))
+ l = len(m.Value)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Effect)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.TimeAdded != nil {
+ l = m.TimeAdded.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
-func (m *ResourceSlice) Size() (n int) {
+func (m *DeviceTaintRule) Size() (n int) {
if m == nil {
return 0
}
@@ -3276,7 +4214,7 @@ func (m *ResourceSlice) Size() (n int) {
return n
}
-func (m *ResourceSliceList) Size() (n int) {
+func (m *DeviceTaintRuleList) Size() (n int) {
if m == nil {
return 0
}
@@ -3293,25 +4231,45 @@ func (m *ResourceSliceList) Size() (n int) {
return n
}
-func (m *ResourceSliceSpec) Size() (n int) {
+func (m *DeviceTaintRuleSpec) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = len(m.Driver)
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Pool.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.NodeName)
+ if m.DeviceSelector != nil {
+ l = m.DeviceSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Taint.Size()
n += 1 + l + sovGenerated(uint64(l))
- if m.NodeSelector != nil {
- l = m.NodeSelector.Size()
+ return n
+}
+
+func (m *DeviceTaintSelector) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.DeviceClassName != nil {
+ l = len(*m.DeviceClassName)
n += 1 + l + sovGenerated(uint64(l))
}
- n += 2
- if len(m.Devices) > 0 {
- for _, e := range m.Devices {
+ if m.Driver != nil {
+ l = len(*m.Driver)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Pool != nil {
+ l = len(*m.Pool)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Device != nil {
+ l = len(*m.Device)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Selectors) > 0 {
+ for _, e := range m.Selectors {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
@@ -3319,15 +4277,273 @@ func (m *ResourceSliceSpec) Size() (n int) {
return n
}
-func sovGenerated(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozGenerated(x uint64) (n int) {
- return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (this *AllocatedDeviceStatus) String() string {
- if this == nil {
- return "nil"
+func (m *DeviceToleration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Operator)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Value)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Effect)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.TolerationSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.TolerationSeconds))
+ }
+ return n
+}
+
+func (m *NetworkDeviceData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.InterfaceName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.IPs) > 0 {
+ for _, s := range m.IPs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.HardwareAddress)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *OpaqueDeviceConfiguration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Driver)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Parameters.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourceClaim) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourceClaimConsumerReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.APIGroup)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Resource)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UID)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourceClaimList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ResourceClaimSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Devices.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourceClaimStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Allocation != nil {
+ l = m.Allocation.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.ReservedFor) > 0 {
+ for _, e := range m.ReservedFor {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Devices) > 0 {
+ for _, e := range m.Devices {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ResourceClaimTemplate) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourceClaimTemplateList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ResourceClaimTemplateSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourcePool) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Generation))
+ n += 1 + sovGenerated(uint64(m.ResourceSliceCount))
+ return n
+}
+
+func (m *ResourceSlice) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourceSliceList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ResourceSliceSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Driver)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Pool.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.NodeName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.NodeSelector != nil {
+ l = m.NodeSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 2
+ if len(m.Devices) > 0 {
+ for _, e := range m.Devices {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.PerDeviceNodeSelection != nil {
+ n += 2
+ }
+ if len(m.SharedCounters) > 0 {
+ for _, e := range m.SharedCounters {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *AllocatedDeviceStatus) String() string {
+ if this == nil {
+ return "nil"
}
repeatedStringForConditions := "[]Condition{"
for _, f := range this.Conditions {
@@ -3339,7 +4555,7 @@ func (this *AllocatedDeviceStatus) String() string {
`Pool:` + fmt.Sprintf("%v", this.Pool) + `,`,
`Device:` + fmt.Sprintf("%v", this.Device) + `,`,
`Conditions:` + repeatedStringForConditions + `,`,
- `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+ `Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1) + `,`,
`NetworkData:` + strings.Replace(this.NetworkData.String(), "NetworkDeviceData", "NetworkDeviceData", 1) + `,`,
`}`,
}, "")
@@ -3360,7 +4576,17 @@ func (this *BasicDevice) String() string {
if this == nil {
return "nil"
}
- keysForAttributes := make([]string, 0, len(this.Attributes))
+ repeatedStringForConsumesCounters := "[]DeviceCounterConsumption{"
+ for _, f := range this.ConsumesCounters {
+ repeatedStringForConsumesCounters += strings.Replace(strings.Replace(f.String(), "DeviceCounterConsumption", "DeviceCounterConsumption", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConsumesCounters += "}"
+ repeatedStringForTaints := "[]DeviceTaint{"
+ for _, f := range this.Taints {
+ repeatedStringForTaints += strings.Replace(strings.Replace(f.String(), "DeviceTaint", "DeviceTaint", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTaints += "}"
+ keysForAttributes := make([]string, 0, len(this.Attributes))
for k := range this.Attributes {
keysForAttributes = append(keysForAttributes, string(k))
}
@@ -3383,6 +4609,11 @@ func (this *BasicDevice) String() string {
s := strings.Join([]string{`&BasicDevice{`,
`Attributes:` + mapStringForAttributes + `,`,
`Capacity:` + mapStringForCapacity + `,`,
+ `ConsumesCounters:` + repeatedStringForConsumesCounters + `,`,
+ `NodeName:` + valueToStringGenerated(this.NodeName) + `,`,
+ `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`,
+ `AllNodes:` + valueToStringGenerated(this.AllNodes) + `,`,
+ `Taints:` + repeatedStringForTaints + `,`,
`}`,
}, "")
return s
@@ -3397,6 +4628,37 @@ func (this *CELDeviceSelector) String() string {
}, "")
return s
}
+func (this *Counter) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Counter{`,
+ `Value:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CounterSet) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForCounters := make([]string, 0, len(this.Counters))
+ for k := range this.Counters {
+ keysForCounters = append(keysForCounters, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+ mapStringForCounters := "map[string]Counter{"
+ for _, k := range keysForCounters {
+ mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k])
+ }
+ mapStringForCounters += "}"
+ s := strings.Join([]string{`&CounterSet{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Counters:` + mapStringForCounters + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *Device) String() string {
if this == nil {
return "nil"
@@ -3571,6 +4833,27 @@ func (this *DeviceConstraint) String() string {
}, "")
return s
}
+func (this *DeviceCounterConsumption) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForCounters := make([]string, 0, len(this.Counters))
+ for k := range this.Counters {
+ keysForCounters = append(keysForCounters, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+ mapStringForCounters := "map[string]Counter{"
+ for _, k := range keysForCounters {
+ mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k])
+ }
+ mapStringForCounters += "}"
+ s := strings.Join([]string{`&DeviceCounterConsumption{`,
+ `CounterSet:` + fmt.Sprintf("%v", this.CounterSet) + `,`,
+ `Counters:` + mapStringForCounters + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *DeviceRequest) String() string {
if this == nil {
return "nil"
@@ -3580,6 +4863,16 @@ func (this *DeviceRequest) String() string {
repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
}
repeatedStringForSelectors += "}"
+ repeatedStringForFirstAvailable := "[]DeviceSubRequest{"
+ for _, f := range this.FirstAvailable {
+ repeatedStringForFirstAvailable += strings.Replace(strings.Replace(f.String(), "DeviceSubRequest", "DeviceSubRequest", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForFirstAvailable += "}"
+ repeatedStringForTolerations := "[]DeviceToleration{"
+ for _, f := range this.Tolerations {
+ repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTolerations += "}"
s := strings.Join([]string{`&DeviceRequest{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`,
@@ -3587,6 +4880,8 @@ func (this *DeviceRequest) String() string {
`AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`,
`Count:` + fmt.Sprintf("%v", this.Count) + `,`,
`AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`,
+ `FirstAvailable:` + repeatedStringForFirstAvailable + `,`,
+ `Tolerations:` + repeatedStringForTolerations + `,`,
`}`,
}, "")
return s
@@ -3595,12 +4890,18 @@ func (this *DeviceRequestAllocationResult) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForTolerations := "[]DeviceToleration{"
+ for _, f := range this.Tolerations {
+ repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTolerations += "}"
s := strings.Join([]string{`&DeviceRequestAllocationResult{`,
`Request:` + fmt.Sprintf("%v", this.Request) + `,`,
`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
`Pool:` + fmt.Sprintf("%v", this.Pool) + `,`,
`Device:` + fmt.Sprintf("%v", this.Device) + `,`,
`AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`,
+ `Tolerations:` + repeatedStringForTolerations + `,`,
`}`,
}, "")
return s
@@ -3615,6 +4916,115 @@ func (this *DeviceSelector) String() string {
}, "")
return s
}
+func (this *DeviceSubRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForSelectors := "[]DeviceSelector{"
+ for _, f := range this.Selectors {
+ repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForSelectors += "}"
+ repeatedStringForTolerations := "[]DeviceToleration{"
+ for _, f := range this.Tolerations {
+ repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTolerations += "}"
+ s := strings.Join([]string{`&DeviceSubRequest{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`,
+ `Selectors:` + repeatedStringForSelectors + `,`,
+ `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`,
+ `Count:` + fmt.Sprintf("%v", this.Count) + `,`,
+ `Tolerations:` + repeatedStringForTolerations + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceTaint) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeviceTaint{`,
+ `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+ `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+ `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`,
+ `TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "v1.Time", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceTaintRule) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeviceTaintRule{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceTaintRuleSpec", "DeviceTaintRuleSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceTaintRuleList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]DeviceTaintRule{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeviceTaintRule", "DeviceTaintRule", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&DeviceTaintRuleList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceTaintRuleSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeviceTaintRuleSpec{`,
+ `DeviceSelector:` + strings.Replace(this.DeviceSelector.String(), "DeviceTaintSelector", "DeviceTaintSelector", 1) + `,`,
+ `Taint:` + strings.Replace(strings.Replace(this.Taint.String(), "DeviceTaint", "DeviceTaint", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceTaintSelector) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForSelectors := "[]DeviceSelector{"
+ for _, f := range this.Selectors {
+ repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForSelectors += "}"
+ s := strings.Join([]string{`&DeviceTaintSelector{`,
+ `DeviceClassName:` + valueToStringGenerated(this.DeviceClassName) + `,`,
+ `Driver:` + valueToStringGenerated(this.Driver) + `,`,
+ `Pool:` + valueToStringGenerated(this.Pool) + `,`,
+ `Device:` + valueToStringGenerated(this.Device) + `,`,
+ `Selectors:` + repeatedStringForSelectors + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceToleration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeviceToleration{`,
+ `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+ `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`,
+ `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+ `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`,
+ `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *NetworkDeviceData) String() string {
if this == nil {
return "nil"
@@ -3797,6 +5207,11 @@ func (this *ResourceSliceSpec) String() string {
repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "Device", "Device", 1), `&`, ``, 1) + ","
}
repeatedStringForDevices += "}"
+ repeatedStringForSharedCounters := "[]CounterSet{"
+ for _, f := range this.SharedCounters {
+ repeatedStringForSharedCounters += strings.Replace(strings.Replace(f.String(), "CounterSet", "CounterSet", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForSharedCounters += "}"
s := strings.Join([]string{`&ResourceSliceSpec{`,
`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
`Pool:` + strings.Replace(strings.Replace(this.Pool.String(), "ResourcePool", "ResourcePool", 1), `&`, ``, 1) + `,`,
@@ -3804,6 +5219,8 @@ func (this *ResourceSliceSpec) String() string {
`NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`,
`AllNodes:` + fmt.Sprintf("%v", this.AllNodes) + `,`,
`Devices:` + repeatedStringForDevices + `,`,
+ `PerDeviceNodeSelection:` + valueToStringGenerated(this.PerDeviceNodeSelection) + `,`,
+ `SharedCounters:` + repeatedStringForSharedCounters + `,`,
`}`,
}, "")
return s
@@ -3813,10 +5230,1915 @@ func valueToStringGenerated(v interface{}) string {
if rv.IsNil() {
return "nil"
}
- pv := reflect.Indirect(rv).Interface()
- return fmt.Sprintf("*%v", pv)
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AllocatedDeviceStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AllocatedDeviceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Driver = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Pool = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Device = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, v1.Condition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Data == nil {
+ m.Data = &runtime.RawExtension{}
+ }
+ if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NetworkData", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NetworkData == nil {
+ m.NetworkData = &NetworkDeviceData{}
+ }
+ if err := m.NetworkData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AllocationResult) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NodeSelector == nil {
+ m.NodeSelector = &v11.NodeSelector{}
+ }
+ if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BasicDevice) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BasicDevice: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BasicDevice: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Attributes == nil {
+ m.Attributes = make(map[QualifiedName]DeviceAttribute)
+ }
+ var mapkey QualifiedName
+ mapvalue := &DeviceAttribute{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &DeviceAttribute{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Attributes[QualifiedName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Capacity == nil {
+ m.Capacity = make(map[QualifiedName]resource.Quantity)
+ }
+ var mapkey QualifiedName
+ mapvalue := &resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Capacity[QualifiedName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConsumesCounters", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ConsumesCounters = append(m.ConsumesCounters, DeviceCounterConsumption{})
+ if err := m.ConsumesCounters[len(m.ConsumesCounters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.NodeName = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NodeSelector == nil {
+ m.NodeSelector = &v11.NodeSelector{}
+ }
+ if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.AllNodes = &b
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Taints = append(m.Taints, DeviceTaint{})
+ if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Expression = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Counter) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Counter: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Counter: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CounterSet) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CounterSet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CounterSet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Counters", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Counters == nil {
+ m.Counters = make(map[string]Counter)
+ }
+ var mapkey string
+ mapvalue := &Counter{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &Counter{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Counters[mapkey] = *mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Device) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Device: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Basic == nil {
+ m.Basic = &BasicDevice{}
+ }
+ if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Results = append(m.Results, DeviceRequestAllocationResult{})
+ if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Config = append(m.Config, DeviceAllocationConfiguration{})
+ if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IntValue = &v
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.BoolValue = &b
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.StringValue = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.VersionValue = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceClaim) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Requests = append(m.Requests, DeviceRequest{})
+ if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Constraints = append(m.Constraints, DeviceConstraint{})
+ if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Config = append(m.Config, DeviceClaimConfiguration{})
+ if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
}
-func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error {
+func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -3839,15 +7161,15 @@ func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: AllocatedDeviceStatus: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: AllocatedDeviceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -3875,13 +7197,13 @@ func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Driver = string(dAtA[iNdEx:postIndex])
+ m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -3891,29 +7213,80 @@ func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Pool = string(dAtA[iNdEx:postIndex])
+ if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 3:
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceClass) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -3923,27 +7296,28 @@ func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Device = string(dAtA[iNdEx:postIndex])
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 4:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -3970,14 +7344,63 @@ func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Conditions = append(m.Conditions, v1.Condition{})
- if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 5:
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4004,13 +7427,63 @@ func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 6:
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceClassList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NetworkData", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4037,10 +7510,41 @@ func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.NetworkData == nil {
- m.NetworkData = &NetworkDeviceData{}
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- if err := m.NetworkData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, DeviceClass{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -4065,7 +7569,7 @@ func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *AllocationResult) Unmarshal(dAtA []byte) error {
+func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4088,15 +7592,15 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4123,13 +7627,14 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Selectors = append(m.Selectors, DeviceSelector{})
+ if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 3:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4156,10 +7661,8 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.NodeSelector == nil {
- m.NodeSelector = &v11.NodeSelector{}
- }
- if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Config = append(m.Config, DeviceClassConfiguration{})
+ if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -4184,7 +7687,7 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *BasicDevice) Unmarshal(dAtA []byte) error {
+func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4207,144 +7710,15 @@ func (m *BasicDevice) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: BasicDevice: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: BasicDevice: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Attributes == nil {
- m.Attributes = make(map[QualifiedName]DeviceAttribute)
- }
- var mapkey QualifiedName
- mapvalue := &DeviceAttribute{}
- for iNdEx < postIndex {
- entryPreIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- if fieldNum == 1 {
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- } else if fieldNum == 2 {
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if postmsgIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue = &DeviceAttribute{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- } else {
- iNdEx = entryPreIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > postIndex {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
- m.Attributes[QualifiedName(mapkey)] = *mapvalue
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4371,105 +7745,12 @@ func (m *BasicDevice) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Capacity == nil {
- m.Capacity = make(map[QualifiedName]resource.Quantity)
+ if m.Opaque == nil {
+ m.Opaque = &OpaqueDeviceConfiguration{}
}
- var mapkey QualifiedName
- mapvalue := &resource.Quantity{}
- for iNdEx < postIndex {
- entryPreIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- if fieldNum == 1 {
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- } else if fieldNum == 2 {
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if postmsgIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue = &resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- } else {
- iNdEx = entryPreIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > postIndex {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
+ if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- m.Capacity[QualifiedName(mapkey)] = *mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -4492,7 +7773,7 @@ func (m *BasicDevice) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error {
+func (m *DeviceConstraint) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4515,15 +7796,15 @@ func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -4551,7 +7832,40 @@ func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Expression = string(dAtA[iNdEx:postIndex])
+ m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := FullyQualifiedName(dAtA[iNdEx:postIndex])
+ m.MatchAttribute = &s
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -4574,7 +7888,7 @@ func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *Device) Unmarshal(dAtA []byte) error {
+func (m *DeviceCounterConsumption) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4597,15 +7911,15 @@ func (m *Device) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: Device: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceCounterConsumption: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceCounterConsumption: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field CounterSet", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -4633,11 +7947,11 @@ func (m *Device) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
+ m.CounterSet = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Counters", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4664,12 +7978,105 @@ func (m *Device) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Basic == nil {
- m.Basic = &BasicDevice{}
+ if m.Counters == nil {
+ m.Counters = make(map[string]Counter)
}
- if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
+ var mapkey string
+ mapvalue := &Counter{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &Counter{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
}
+ m.Counters[mapkey] = *mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -4692,7 +8099,7 @@ func (m *Device) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error {
+func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4715,15 +8122,15 @@ func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -4751,11 +8158,11 @@ func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex])
+ m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -4783,11 +8190,11 @@ func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
+ m.DeviceClassName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4814,63 +8221,86 @@ func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Selectors = append(m.Selectors, DeviceSelector{})
+ if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType)
}
- if (skippy < 0) || (iNdEx+skippy) < 0 {
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- if (iNdEx + skippy) > l {
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
return io.ErrUnexpectedEOF
}
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
}
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
+ m.Count = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Count |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType)
}
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.AdminAccess = &b
+ case 7:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field FirstAvailable", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4897,14 +8327,14 @@ func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Results = append(m.Results, DeviceRequestAllocationResult{})
- if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.FirstAvailable = append(m.FirstAvailable, DeviceSubRequest{})
+ if err := m.FirstAvailable[len(m.FirstAvailable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 2:
+ case 8:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4931,8 +8361,8 @@ func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Config = append(m.Config, DeviceAllocationConfiguration{})
- if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Tolerations = append(m.Tolerations, DeviceToleration{})
+ if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -4957,7 +8387,7 @@ func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
+func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4980,17 +8410,17 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
}
- var v int64
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5000,17 +8430,29 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= int64(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- m.IntValue = &v
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
}
- var v int
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Request = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+ }
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5020,16 +8462,27 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- b := bool(v != 0)
- m.BoolValue = &b
- case 4:
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Driver = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -5057,12 +8510,11 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := string(dAtA[iNdEx:postIndex])
- m.StringValue = &s
+ m.Pool = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 5:
+ case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -5090,8 +8542,62 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := string(dAtA[iNdEx:postIndex])
- m.VersionValue = &s
+ m.Device = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.AdminAccess = &b
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Tolerations = append(m.Tolerations, DeviceToleration{})
+ if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -5114,7 +8620,7 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceClaim) Unmarshal(dAtA []byte) error {
+func (m *DeviceSelector) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5137,49 +8643,15 @@ func (m *DeviceClaim) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Requests = append(m.Requests, DeviceRequest{})
- if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5206,42 +8678,10 @@ func (m *DeviceClaim) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Constraints = append(m.Constraints, DeviceConstraint{})
- if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
+ if m.CEL == nil {
+ m.CEL = &CELDeviceSelector{}
}
- m.Config = append(m.Config, DeviceClaimConfiguration{})
- if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -5266,7 +8706,7 @@ func (m *DeviceClaim) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error {
+func (m *DeviceSubRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5289,15 +8729,47 @@ func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceSubRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceSubRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -5325,11 +8797,11 @@ func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
+ m.DeviceClassName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 2:
+ case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5356,65 +8828,16 @@ func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Selectors = append(m.Selectors, DeviceSelector{})
+ if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceClass) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5424,28 +8847,46 @@ func (m *DeviceClass) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 2:
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+ }
+ m.Count = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Count |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 7:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5472,7 +8913,8 @@ func (m *DeviceClass) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Tolerations = append(m.Tolerations, DeviceToleration{})
+ if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -5497,7 +8939,7 @@ func (m *DeviceClass) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error {
+func (m *DeviceTaint) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5520,17 +8962,17 @@ func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceTaint: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceTaint: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5540,80 +8982,61 @@ func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Key = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
}
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceClassList) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
}
- if iNdEx >= l {
+ if postIndex > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ m.Value = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5623,28 +9046,27 @@ func (m *DeviceClassList) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Effect = DeviceTaintEffect(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 2:
+ case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeAdded", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5671,8 +9093,10 @@ func (m *DeviceClassList) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Items = append(m.Items, DeviceClass{})
- if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if m.TimeAdded == nil {
+ m.TimeAdded = &v1.Time{}
+ }
+ if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -5697,7 +9121,7 @@ func (m *DeviceClassList) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error {
+func (m *DeviceTaintRule) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5720,15 +9144,15 @@ func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceTaintRule: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceTaintRule: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5755,14 +9179,13 @@ func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Selectors = append(m.Selectors, DeviceSelector{})
- if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5789,8 +9212,7 @@ func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Config = append(m.Config, DeviceClassConfiguration{})
- if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -5815,7 +9237,7 @@ func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error {
+func (m *DeviceTaintRuleList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5838,15 +9260,15 @@ func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceTaintRuleList: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceTaintRuleList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5873,10 +9295,41 @@ func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Opaque == nil {
- m.Opaque = &OpaqueDeviceConfiguration{}
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
}
- if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Items = append(m.Items, DeviceTaintRule{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -5901,7 +9354,7 @@ func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceConstraint) Unmarshal(dAtA []byte) error {
+func (m *DeviceTaintRuleSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5924,17 +9377,17 @@ func (m *DeviceConstraint) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceTaintRuleSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceTaintRuleSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field DeviceSelector", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5944,29 +9397,33 @@ func (m *DeviceConstraint) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
+ if m.DeviceSelector == nil {
+ m.DeviceSelector = &DeviceTaintSelector{}
+ }
+ if err := m.DeviceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Taint", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5976,24 +9433,24 @@ func (m *DeviceConstraint) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := FullyQualifiedName(dAtA[iNdEx:postIndex])
- m.MatchAttribute = &s
+ if err := m.Taint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -6016,7 +9473,7 @@ func (m *DeviceConstraint) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
+func (m *DeviceTaintSelector) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -6039,15 +9496,15 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceTaintSelector: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceTaintSelector: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -6075,11 +9532,12 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
+ s := string(dAtA[iNdEx:postIndex])
+ m.DeviceClassName = &s
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -6107,13 +9565,14 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.DeviceClassName = string(dAtA[iNdEx:postIndex])
+ s := string(dAtA[iNdEx:postIndex])
+ m.Driver = &s
iNdEx = postIndex
case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -6123,29 +9582,28 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Selectors = append(m.Selectors, DeviceSelector{})
- if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Pool = &s
iNdEx = postIndex
case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -6173,13 +9631,14 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex])
+ s := string(dAtA[iNdEx:postIndex])
+ m.Device = &s
iNdEx = postIndex
case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
}
- m.Count = 0
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -6189,32 +9648,26 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Count |= int64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType)
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
}
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
}
- b := bool(v != 0)
- m.AdminAccess = &b
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Selectors = append(m.Selectors, DeviceSelector{})
+ if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -6236,7 +9689,7 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
+func (m *DeviceToleration) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -6259,15 +9712,15 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceToleration: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceToleration: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -6295,11 +9748,11 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Request = string(dAtA[iNdEx:postIndex])
+ m.Key = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -6327,11 +9780,11 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Driver = string(dAtA[iNdEx:postIndex])
+ m.Operator = DeviceTolerationOperator(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -6359,11 +9812,11 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Pool = string(dAtA[iNdEx:postIndex])
+ m.Value = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -6391,84 +9844,13 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Device = string(dAtA[iNdEx:postIndex])
+ m.Effect = DeviceTaintEffect(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- b := bool(v != 0)
- m.AdminAccess = &b
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeviceSelector) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field TolerationSeconds", wireType)
}
- var msglen int
+ var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -6478,28 +9860,12 @@ func (m *DeviceSelector) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ v |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.CEL == nil {
- m.CEL = &CELDeviceSelector{}
- }
- if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
+ m.TolerationSeconds = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -8381,6 +11747,61 @@ func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PerDeviceNodeSelection", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.PerDeviceNodeSelection = &b
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SharedCounters", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SharedCounters = append(m.SharedCounters, CounterSet{})
+ if err := m.SharedCounters[len(m.SharedCounters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/vendor/k8s.io/api/resource/v1alpha3/generated.proto
index e802a0143..103cafc6a 100644
--- a/vendor/k8s.io/api/resource/v1alpha3/generated.proto
+++ b/vendor/k8s.io/api/resource/v1alpha3/generated.proto
@@ -62,6 +62,8 @@ message AllocatedDeviceStatus {
// If the device has been configured according to the class and claim
// config references, the `Ready` condition should be True.
//
+ // Must not contain more than 8 entries.
+ //
// +optional
// +listType=map
// +listMapKey=type
@@ -111,6 +113,64 @@ message BasicDevice {
//
// +optional
map capacity = 2;
+
+ // ConsumesCounters defines a list of references to sharedCounters
+ // and the set of counters that the device will
+ // consume from those counter sets.
+ //
+ // There can only be a single entry per counterSet.
+ //
+ // The total number of device counter consumption entries
+ // must be <= 32. In addition, the total number in the
+ // entire ResourceSlice must be <= 1024 (for example,
+ // 64 devices with 16 counters each).
+ //
+ // +optional
+ // +listType=atomic
+ // +featureGate=DRAPartitionableDevices
+ repeated DeviceCounterConsumption consumesCounters = 3;
+
+ // NodeName identifies the node where the device is available.
+ //
+ // Must only be set if Spec.PerDeviceNodeSelection is set to true.
+ // At most one of NodeName, NodeSelector and AllNodes can be set.
+ //
+ // +optional
+ // +oneOf=DeviceNodeSelection
+ // +featureGate=DRAPartitionableDevices
+ optional string nodeName = 4;
+
+ // NodeSelector defines the nodes where the device is available.
+ //
+ // Must only be set if Spec.PerDeviceNodeSelection is set to true.
+ // At most one of NodeName, NodeSelector and AllNodes can be set.
+ //
+ // +optional
+ // +oneOf=DeviceNodeSelection
+ // +featureGate=DRAPartitionableDevices
+ optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 5;
+
+ // AllNodes indicates that all nodes have access to the device.
+ //
+ // Must only be set if Spec.PerDeviceNodeSelection is set to true.
+ // At most one of NodeName, NodeSelector and AllNodes can be set.
+ //
+ // +optional
+ // +oneOf=DeviceNodeSelection
+ // +featureGate=DRAPartitionableDevices
+ optional bool allNodes = 6;
+
+ // If specified, these are the driver-defined taints.
+ //
+ // The maximum number of taints is 4.
+ //
+ // This is an alpha field and requires enabling the DRADeviceTaints
+ // feature gate.
+ //
+ // +optional
+ // +listType=atomic
+ // +featureGate=DRADeviceTaints
+ repeated DeviceTaint taints = 7;
}
// CELDeviceSelector contains a CEL expression for selecting a device.
@@ -170,6 +230,42 @@ message CELDeviceSelector {
optional string expression = 1;
}
+// Counter describes a quantity associated with a device.
+message Counter {
+ // Value defines how much of a certain device counter is available.
+ //
+ // +required
+ optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1;
+}
+
+// CounterSet defines a named set of counters
+// that are available to be used by devices defined in the
+// ResourceSlice.
+//
+// The counters are not allocatable by themselves, but
+// can be referenced by devices. When a device is allocated,
+// the portion of counters it uses will no longer be available for use
+// by other devices.
+message CounterSet {
+ // CounterSet is the name of the set from which the
+ // counters defined will be consumed.
+ //
+ // +required
+ optional string name = 1;
+
+ // Counters defines the counters that will be consumed by the device.
+ // The name of each counter must be unique in that set and must be a DNS label.
+ //
+ // To ensure this uniqueness, capacities defined by the vendor
+ // must be listed without the driver name as domain prefix in
+ // their name. All others must be listed with their domain prefix.
+ //
+ // The maximum number of counters is 32.
+ //
+ // +required
+ map counters = 2;
+}
+
// Device represents one individual hardware instance that can be selected based
// on its attributes. Besides the name, exactly one field must be set.
message Device {
@@ -198,6 +294,10 @@ message DeviceAllocationConfiguration {
// Requests lists the names of requests where the configuration applies.
// If empty, its applies to all requests.
//
+ // References to subrequests must include the name of the main request
+ // and may include the subrequest using the format [/]. If just
+ // the main request is given, the configuration applies to all subrequests.
+ //
// +optional
// +listType=atomic
repeated string requests = 2;
@@ -284,6 +384,10 @@ message DeviceClaimConfiguration {
// Requests lists the names of requests where the configuration applies.
// If empty, it applies to all requests.
//
+ // References to subrequests must include the name of the main request
+ // and may include the subrequest using the format [/]. If just
+ // the main request is given, the configuration applies to all subrequests.
+ //
// +optional
// +listType=atomic
repeated string requests = 1;
@@ -368,6 +472,10 @@ message DeviceConstraint {
// constraint. If this is not specified, this constraint applies to all
// requests in this claim.
//
+ // References to subrequests must include the name of the main request
+ // and may include the subrequest using the format [/]. If just
+ // the main request is given, the constraint applies to all subrequests.
+ //
// +optional
// +listType=atomic
repeated string requests = 1;
@@ -390,14 +498,30 @@ message DeviceConstraint {
optional string matchAttribute = 2;
}
+// DeviceCounterConsumption defines a set of counters that
+// a device will consume from a CounterSet.
+message DeviceCounterConsumption {
+ // CounterSet defines the set from which the
+ // counters defined will be consumed.
+ //
+ // +required
+ optional string counterSet = 1;
+
+ // Counters defines the Counter that will be consumed by
+ // the device.
+ //
+ // The maximum number counters in a device is 32.
+ // In addition, the maximum number of all counters
+ // in all devices is 1024 (for example, 64 devices with
+ // 16 counters each).
+ //
+ // +required
+ map counters = 2;
+}
+
// DeviceRequest is a request for devices required for a claim.
// This is typically a request for a single resource like a device, but can
// also ask for several identical devices.
-//
-// A DeviceClassName is currently required. Clients must check that it is
-// indeed set. It's absence indicates that something changed in a way that
-// is not supported by the client yet, in which case it must refuse to
-// handle the request.
message DeviceRequest {
// Name can be used to reference this request in a pod.spec.containers[].resources.claims
// entry and in a constraint of the claim.
@@ -411,7 +535,10 @@ message DeviceRequest {
// additional configuration and selectors to be inherited by this
// request.
//
- // A class is required. Which classes are available depends on the cluster.
+ // A class is required if no subrequests are specified in the
+ // firstAvailable list and no class can be set if subrequests
+ // are specified in the firstAvailable list.
+ // Which classes are available depends on the cluster.
//
// Administrators may use this to restrict which devices may get
// requested by only installing classes with selectors for permitted
@@ -419,7 +546,8 @@ message DeviceRequest {
// then administrators can create an empty DeviceClass for users
// to reference.
//
- // +required
+ // +optional
+ // +oneOf=deviceRequestType
optional string deviceClassName = 2;
// Selectors define criteria which must be satisfied by a specific
@@ -427,6 +555,9 @@ message DeviceRequest {
// request. All selectors must be satisfied for a device to be
// considered.
//
+ // This field can only be set when deviceClassName is set and no subrequests
+ // are specified in the firstAvailable list.
+ //
// +optional
// +listType=atomic
repeated DeviceSelector selectors = 3;
@@ -439,13 +570,17 @@ message DeviceRequest {
// count field.
//
// - All: This request is for all of the matching devices in a pool.
+ // At least one device must exist on the node for the allocation to succeed.
// Allocation will fail if some devices are already allocated,
// unless adminAccess is requested.
//
- // If AlloctionMode is not specified, the default mode is ExactCount. If
+ // If AllocationMode is not specified, the default mode is ExactCount. If
// the mode is ExactCount and count is not specified, the default count is
// one. Any other requests must specify this field.
//
+ // This field can only be set when deviceClassName is set and no subrequests
+ // are specified in the firstAvailable list.
+ //
// More modes may get added in the future. Clients must refuse to handle
// requests with unknown modes.
//
@@ -455,6 +590,9 @@ message DeviceRequest {
// Count is used only when the count mode is "ExactCount". Must be greater than zero.
// If AllocationMode is ExactCount and this field is not specified, the default is one.
//
+ // This field can only be set when deviceClassName is set and no subrequests
+ // are specified in the firstAvailable list.
+ //
// +optional
// +oneOf=AllocationMode
optional int64 count = 5;
@@ -465,6 +603,9 @@ message DeviceRequest {
// all ordinary claims to the device with respect to access modes and
// any resource allocations.
//
+ // This field can only be set when deviceClassName is set and no subrequests
+ // are specified in the firstAvailable list.
+ //
// This is an alpha field and requires enabling the DRAAdminAccess
// feature gate. Admin access is disabled if this field is unset or
// set to false, otherwise it is enabled.
@@ -472,13 +613,65 @@ message DeviceRequest {
// +optional
// +featureGate=DRAAdminAccess
optional bool adminAccess = 6;
+
+ // FirstAvailable contains subrequests, of which exactly one will be
+ // satisfied by the scheduler to satisfy this request. It tries to
+ // satisfy them in the order in which they are listed here. So if
+ // there are two entries in the list, the scheduler will only check
+ // the second one if it determines that the first one cannot be used.
+ //
+ // This field may only be set in the entries of DeviceClaim.Requests.
+ //
+ // DRA does not yet implement scoring, so the scheduler will
+ // select the first set of devices that satisfies all the
+ // requests in the claim. And if the requirements can
+ // be satisfied on more than one node, other scheduling features
+ // will determine which node is chosen. This means that the set of
+ // devices allocated to a claim might not be the optimal set
+ // available to the cluster. Scoring will be implemented later.
+ //
+ // +optional
+ // +oneOf=deviceRequestType
+ // +listType=atomic
+ // +featureGate=DRAPrioritizedList
+ repeated DeviceSubRequest firstAvailable = 7;
+
+ // If specified, the request's tolerations.
+ //
+ // Tolerations for NoSchedule are required to allocate a
+ // device which has a taint with that effect. The same applies
+ // to NoExecute.
+ //
+ // In addition, should any of the allocated devices get tainted
+ // with NoExecute after allocation and that effect is not tolerated,
+ // then all pods consuming the ResourceClaim get deleted to evict
+ // them. The scheduler will not let new pods reserve the claim while
+ // it has these tainted devices. Once all pods are evicted, the
+ // claim will get deallocated.
+ //
+ // The maximum number of tolerations is 16.
+ //
+ // This field can only be set when deviceClassName is set and no subrequests
+ // are specified in the firstAvailable list.
+ //
+ // This is an alpha field and requires enabling the DRADeviceTaints
+ // feature gate.
+ //
+ // +optional
+ // +listType=atomic
+ // +featureGate=DRADeviceTaints
+ repeated DeviceToleration tolerations = 8;
}
// DeviceRequestAllocationResult contains the allocation result for one request.
message DeviceRequestAllocationResult {
// Request is the name of the request in the claim which caused this
- // device to be allocated. Multiple devices may have been allocated
- // per request.
+ // device to be allocated. If it references a subrequest in the
+ // firstAvailable list on a DeviceRequest, this field must
+ // include both the name of the main request and the subrequest
+ // using the format /.
+ //
+ // Multiple devices may have been allocated per request.
//
// +required
optional string request = 1;
@@ -519,6 +712,19 @@ message DeviceRequestAllocationResult {
// +optional
// +featureGate=DRAAdminAccess
optional bool adminAccess = 5;
+
+ // A copy of all tolerations specified in the request at the time
+ // when the device got allocated.
+ //
+ // The maximum number of tolerations is 16.
+ //
+ // This is an alpha field and requires enabling the DRADeviceTaints
+ // feature gate.
+ //
+ // +optional
+ // +listType=atomic
+ // +featureGate=DRADeviceTaints
+ repeated DeviceToleration tolerations = 6;
}
// DeviceSelector must have exactly one field set.
@@ -530,6 +736,262 @@ message DeviceSelector {
optional CELDeviceSelector cel = 1;
}
+// DeviceSubRequest describes a request for device provided in the
+// claim.spec.devices.requests[].firstAvailable array. Each
+// is typically a request for a single resource like a device, but can
+// also ask for several identical devices.
+//
+// DeviceSubRequest is similar to Request, but doesn't expose the AdminAccess
+// or FirstAvailable fields, as those can only be set on the top-level request.
+// AdminAccess is not supported for requests with a prioritized list, and
+// recursive FirstAvailable fields are not supported.
+message DeviceSubRequest {
+ // Name can be used to reference this subrequest in the list of constraints
+ // or the list of configurations for the claim. References must use the
+ // format /.
+ //
+ // Must be a DNS label.
+ //
+ // +required
+ optional string name = 1;
+
+ // DeviceClassName references a specific DeviceClass, which can define
+ // additional configuration and selectors to be inherited by this
+ // subrequest.
+ //
+ // A class is required. Which classes are available depends on the cluster.
+ //
+ // Administrators may use this to restrict which devices may get
+ // requested by only installing classes with selectors for permitted
+ // devices. If users are free to request anything without restrictions,
+ // then administrators can create an empty DeviceClass for users
+ // to reference.
+ //
+ // +required
+ optional string deviceClassName = 2;
+
+ // Selectors define criteria which must be satisfied by a specific
+ // device in order for that device to be considered for this
+ // request. All selectors must be satisfied for a device to be
+ // considered.
+ //
+ // +optional
+ // +listType=atomic
+ repeated DeviceSelector selectors = 3;
+
+ // AllocationMode and its related fields define how devices are allocated
+ // to satisfy this request. Supported values are:
+ //
+ // - ExactCount: This request is for a specific number of devices.
+ // This is the default. The exact number is provided in the
+ // count field.
+ //
+ // - All: This request is for all of the matching devices in a pool.
+ // Allocation will fail if some devices are already allocated,
+ // unless adminAccess is requested.
+ //
+ // If AllocationMode is not specified, the default mode is ExactCount. If
+ // the mode is ExactCount and count is not specified, the default count is
+ // one. Any other requests must specify this field.
+ //
+ // More modes may get added in the future. Clients must refuse to handle
+ // requests with unknown modes.
+ //
+ // +optional
+ optional string allocationMode = 4;
+
+ // Count is used only when the count mode is "ExactCount". Must be greater than zero.
+ // If AllocationMode is ExactCount and this field is not specified, the default is one.
+ //
+ // +optional
+ // +oneOf=AllocationMode
+ optional int64 count = 5;
+
+ // If specified, the request's tolerations.
+ //
+ // Tolerations for NoSchedule are required to allocate a
+ // device which has a taint with that effect. The same applies
+ // to NoExecute.
+ //
+ // In addition, should any of the allocated devices get tainted
+ // with NoExecute after allocation and that effect is not tolerated,
+ // then all pods consuming the ResourceClaim get deleted to evict
+ // them. The scheduler will not let new pods reserve the claim while
+ // it has these tainted devices. Once all pods are evicted, the
+ // claim will get deallocated.
+ //
+ // The maximum number of tolerations is 16.
+ //
+ // This is an alpha field and requires enabling the DRADeviceTaints
+ // feature gate.
+ //
+ // +optional
+ // +listType=atomic
+ // +featureGate=DRADeviceTaints
+ repeated DeviceToleration tolerations = 7;
+}
+
+// The device this taint is attached to has the "effect" on
+// any claim which does not tolerate the taint and, through the claim,
+// to pods using the claim.
+message DeviceTaint {
+ // The taint key to be applied to a device.
+ // Must be a label name.
+ //
+ // +required
+ optional string key = 1;
+
+ // The taint value corresponding to the taint key.
+ // Must be a label value.
+ //
+ // +optional
+ optional string value = 2;
+
+ // The effect of the taint on claims that do not tolerate the taint
+ // and through such claims on the pods using them.
+ // Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for
+ // nodes is not valid here.
+ //
+ // +required
+ optional string effect = 3;
+
+ // TimeAdded represents the time at which the taint was added.
+ // Added automatically during create or update if not set.
+ //
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4;
+}
+
+// DeviceTaintRule adds one taint to all devices which match the selector.
+// This has the same effect as if the taint was specified directly
+// in the ResourceSlice by the DRA driver.
+message DeviceTaintRule {
+ // Standard object metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec specifies the selector and one taint.
+ //
+ // Changing the spec automatically increments the metadata.generation number.
+ optional DeviceTaintRuleSpec spec = 2;
+}
+
+// DeviceTaintRuleList is a collection of DeviceTaintRules.
+message DeviceTaintRuleList {
+ // Standard list metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of DeviceTaintRules.
+ repeated DeviceTaintRule items = 2;
+}
+
+// DeviceTaintRuleSpec specifies the selector and one taint.
+message DeviceTaintRuleSpec {
+ // DeviceSelector defines which device(s) the taint is applied to.
+ // All selector criteria must be satified for a device to
+ // match. The empty selector matches all devices. Without
+ // a selector, no devices are matches.
+ //
+ // +optional
+ optional DeviceTaintSelector deviceSelector = 1;
+
+ // The taint that gets applied to matching devices.
+ //
+ // +required
+ optional DeviceTaint taint = 2;
+}
+
+// DeviceTaintSelector defines which device(s) a DeviceTaintRule applies to.
+// The empty selector matches all devices. Without a selector, no devices
+// are matched.
+message DeviceTaintSelector {
+ // If DeviceClassName is set, the selectors defined there must be
+ // satisfied by a device to be selected. This field corresponds
+ // to class.metadata.name.
+ //
+ // +optional
+ optional string deviceClassName = 1;
+
+ // If driver is set, only devices from that driver are selected.
+ // This fields corresponds to slice.spec.driver.
+ //
+ // +optional
+ optional string driver = 2;
+
+ // If pool is set, only devices in that pool are selected.
+ //
+ // Also setting the driver name may be useful to avoid
+ // ambiguity when different drivers use the same pool name,
+ // but this is not required because selecting pools from
+ // different drivers may also be useful, for example when
+ // drivers with node-local devices use the node name as
+ // their pool name.
+ //
+ // +optional
+ optional string pool = 3;
+
+ // If device is set, only devices with that name are selected.
+ // This field corresponds to slice.spec.devices[].name.
+ //
+ // Setting also driver and pool may be required to avoid ambiguity,
+ // but is not required.
+ //
+ // +optional
+ optional string device = 4;
+
+ // Selectors contains the same selection criteria as a ResourceClaim.
+ // Currently, CEL expressions are supported. All of these selectors
+ // must be satisfied.
+ //
+ // +optional
+ // +listType=atomic
+ repeated DeviceSelector selectors = 5;
+}
+
+// The ResourceClaim this DeviceToleration is attached to tolerates any taint that matches
+// the triple using the matching operator .
+message DeviceToleration {
+ // Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ // If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ // Must be a label name.
+ //
+ // +optional
+ optional string key = 1;
+
+ // Operator represents a key's relationship to the value.
+ // Valid operators are Exists and Equal. Defaults to Equal.
+ // Exists is equivalent to wildcard for value, so that a ResourceClaim can
+ // tolerate all taints of a particular category.
+ //
+ // +optional
+ // +default="Equal"
+ optional string operator = 2;
+
+ // Value is the taint value the toleration matches to.
+ // If the operator is Exists, the value must be empty, otherwise just a regular string.
+ // Must be a label value.
+ //
+ // +optional
+ optional string value = 3;
+
+ // Effect indicates the taint effect to match. Empty means match all taint effects.
+ // When specified, allowed values are NoSchedule and NoExecute.
+ //
+ // +optional
+ optional string effect = 4;
+
+ // TolerationSeconds represents the period of time the toleration (which must be
+ // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ // it is not set, which means tolerate the taint forever (do not evict). Zero and
+ // negative values will be treated as 0 (evict immediately) by the system.
+ // If larger than zero, the time when the pod needs to be evicted is calculated as